From 753076b3da12b4b350b82ee4f0d31942b5e7648d Mon Sep 17 00:00:00 2001 From: Dman0808 <168228320+Dman0808@users.noreply.github.com> Date: Sat, 12 Oct 2024 18:26:26 -0500 Subject: [PATCH 01/14] refactor(frontend): Update heading in README.md --- README.md | 2 +- autogpt_platform/frontend/flutter | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 160000 autogpt_platform/frontend/flutter diff --git a/README.md b/README.md index 2371fa39f8d0..86958e335a59 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ https://github.com/user-attachments/assets/d04273a5-b36a-4a37-818e-f631ce72d603 This tutorial assumes you have Docker, VSCode, git and npm installed. -### 🧱 AutoGPT Frontend +### 🧱 **AutoGPT** Frontend The AutoGPT frontend is where users interact with our powerful AI automation platform. It offers multiple ways to engage with and leverage our AI agents. This is the interface where you'll bring your AI automation ideas to life: diff --git a/autogpt_platform/frontend/flutter b/autogpt_platform/frontend/flutter new file mode 160000 index 000000000000..2663184aa790 --- /dev/null +++ b/autogpt_platform/frontend/flutter @@ -0,0 +1 @@ +Subproject commit 2663184aa79047d0a33a14a3b607954f8fdd8730 From abe8b3e0293dda2d9343d83066d2f87189e86e9c Mon Sep 17 00:00:00 2001 From: Dman0808 <168228320+Dman0808@users.noreply.github.com> Date: Sat, 12 Oct 2024 18:50:29 -0500 Subject: [PATCH 02/14] Update all-projects.code-workspace --- .vscode/all-projects.code-workspace | 54 +++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/.vscode/all-projects.code-workspace b/.vscode/all-projects.code-workspace index aa3b35b10c78..ff575ab3ca26 100644 --- a/.vscode/all-projects.code-workspace +++ b/.vscode/all-projects.code-workspace @@ -58,5 +58,59 @@ "prisma.prisma", "qwtel.sqlite-viewer" ] + }, + "launch": { + "configurations": [ + { + "type": "pwa-msedge", + "name": "Launch Microsoft Edge", + "request": "launch", + "runtimeArgs": [ + "--remote-debugging-port=9222" + ], + "url": "https://thepathwaypioneer.com/", + "presentation": { + "hidden": true + } + }, + { + "type": "pwa-msedge", + "name": "Launch Microsoft Edge in headless mode", + "request": "launch", + "runtimeArgs": [ + "--headless", + "--remote-debugging-port=9222" + ], + "url": "https://thepathwaypioneer.com/", + "presentation": { + "hidden": true + } + }, + { + "type": "vscode-edge-devtools.debug", + "name": "Open Edge DevTools", + "request": "attach", + "url": "https://thepathwaypioneer.com/", + "presentation": { + "hidden": true + } + } + ], + "compounds": [ + { + "name": "Launch Edge Headless and attach DevTools", + "configurations": [ + "Launch Microsoft Edge in headless mode", + "Open Edge DevTools" + ] + }, + { + "name": "Launch Edge and attach DevTools", + "configurations": [ + "Launch Microsoft Edge", + "Open Edge DevTools" + ] + } + ] } } From 1e3fcf24934a860809fd0d6324a6044d5712c96d Mon Sep 17 00:00:00 2001 From: Dman0808 <168228320+Dman0808@users.noreply.github.com> Date: Sat, 12 Oct 2024 18:56:37 -0500 Subject: [PATCH 03/14] Update 1.bug.yml --- .github/ISSUE_TEMPLATE/1.bug.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/1.bug.yml b/.github/ISSUE_TEMPLATE/1.bug.yml index a74ef2b38f9d..9d4d39db4b98 100644 --- a/.github/ISSUE_TEMPLATE/1.bug.yml +++ b/.github/ISSUE_TEMPLATE/1.bug.yml @@ -5,7 +5,6 @@ body: - type: markdown attributes: value: | - ### ⚠️ Before you continue * Check out our [backlog], [roadmap] and join our [discord] to discuss what's going on * If you need help, you can ask in the [discussions] section or in [#tech-support] * **Thoroughly search the [existing issues] before creating a new one** @@ -170,4 +169,3 @@ body: ⚠️ The error log may contain personal data given to AutoGPT by you in prompt or input as well as any personal information that AutoGPT collected out of files during last run. Do not add the activity log if you are not comfortable with sharing it. ⚠️ validations: - required: false From 03371a6296fa6eeefe623930e82e7a014ae37355 Mon Sep 17 00:00:00 2001 From: Dman0808 <168228320+Dman0808@users.noreply.github.com> Date: Sat, 12 Oct 2024 18:57:22 -0500 Subject: [PATCH 04/14] Update 1.bug.yml --- .github/ISSUE_TEMPLATE/1.bug.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/ISSUE_TEMPLATE/1.bug.yml b/.github/ISSUE_TEMPLATE/1.bug.yml index 9d4d39db4b98..229b37ed6bd3 100644 --- a/.github/ISSUE_TEMPLATE/1.bug.yml +++ b/.github/ISSUE_TEMPLATE/1.bug.yml @@ -169,3 +169,5 @@ body: ⚠️ The error log may contain personal data given to AutoGPT by you in prompt or input as well as any personal information that AutoGPT collected out of files during last run. Do not add the activity log if you are not comfortable with sharing it. ⚠️ validations: + required: true + From 1f698573259d5888fffa1635298b3af39869f854 Mon Sep 17 00:00:00 2001 From: Dman0808 <168228320+Dman0808@users.noreply.github.com> Date: Sat, 12 Oct 2024 18:57:27 -0500 Subject: [PATCH 05/14] Update 1.bug.yml --- .github/ISSUE_TEMPLATE/1.bug.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/ISSUE_TEMPLATE/1.bug.yml b/.github/ISSUE_TEMPLATE/1.bug.yml index 229b37ed6bd3..977bd401e7d1 100644 --- a/.github/ISSUE_TEMPLATE/1.bug.yml +++ b/.github/ISSUE_TEMPLATE/1.bug.yml @@ -5,6 +5,7 @@ body: - type: markdown attributes: value: | + "### ⚠️ Before you continue" * Check out our [backlog], [roadmap] and join our [discord] to discuss what's going on * If you need help, you can ask in the [discussions] section or in [#tech-support] * **Thoroughly search the [existing issues] before creating a new one** From 14ad65c9f1aff8f8579f2d7507fcaac40e91d461 Mon Sep 17 00:00:00 2001 From: Dman0808 <168228320+Dman0808@users.noreply.github.com> Date: Sat, 12 Oct 2024 18:57:27 -0500 Subject: [PATCH 06/14] Update 1.bug.yml --- .github/ISSUE_TEMPLATE/1.bug.yml | 1 + .github/workflows/blank.yml | 36 ++++++++++ classic/frontend/lib/main.dart | 4 +- .../frontend/lib/services/auth_service.dart | 1 + .../services/shared_preferences_service.dart | 2 +- .../lib/viewmodels/chat_viewmodel.dart | 2 +- .../lib/viewmodels/task_queue_viewmodel.dart | 6 +- .../lib/views/chat/agent_message_tile.dart | 5 +- .../lib/views/chat/chat_input_field.dart | 1 - .../views/settings/api_base_url_field.dart | 2 + .../lib/views/side_bar/side_bar_view.dart | 2 +- .../lib/views/skill_tree/tree_node_view.dart | 6 +- .../lib/views/task/new_task_button.dart | 6 +- .../lib/views/task/task_list_tile.dart | 1 + .../views/task/test_suite_detail_view.dart | 4 +- .../lib/views/task/test_suite_list_tile.dart | 1 + .../leaderboard_submission_button.dart | 4 +- .../leaderboard_submission_dialog.dart | 2 +- .../lib/views/task_queue/task_queue_view.dart | 20 +++--- .../views/task_queue/test_suite_button.dart | 2 +- classic/frontend/pubspec.lock | 68 +++++++++++++------ 21 files changed, 122 insertions(+), 54 deletions(-) create mode 100644 .github/workflows/blank.yml diff --git a/.github/ISSUE_TEMPLATE/1.bug.yml b/.github/ISSUE_TEMPLATE/1.bug.yml index 229b37ed6bd3..977bd401e7d1 100644 --- a/.github/ISSUE_TEMPLATE/1.bug.yml +++ b/.github/ISSUE_TEMPLATE/1.bug.yml @@ -5,6 +5,7 @@ body: - type: markdown attributes: value: | + "### ⚠️ Before you continue" * Check out our [backlog], [roadmap] and join our [discord] to discuss what's going on * If you need help, you can ask in the [discussions] section or in [#tech-support] * **Thoroughly search the [existing issues] before creating a new one** diff --git a/.github/workflows/blank.yml b/.github/workflows/blank.yml new file mode 100644 index 000000000000..7ec139694147 --- /dev/null +++ b/.github/workflows/blank.yml @@ -0,0 +1,36 @@ +# This is a basic workflow to help you get started with Actions + +name: Blank workflow + +# Controls when the action will run. +on: + # Triggers the workflow on push or pull request events but only for the $default-branch branch + push: + branches: [ $default-branch ] + pull_request: + branches: [ $default-branch ] + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +# A workflow run is made up of one or more jobs that can run sequentially or in parallel +jobs: + # This workflow contains a single job called "build" + build: + # The type of runner that the job will run on + runs-on: ubuntu-latest + + # Steps represent a sequence of tasks that will be executed as part of the job + steps: + # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it + - uses: actions/checkout@v2 + + # Runs a single command using the runners shell + - name: Run a one-line script + run: echo Hello, world! + + # Runs a set of commands using the runners shell + - name: Run a multi-line script + run: | + echo Add other actions to build, + echo test, and deploy your project. diff --git a/classic/frontend/lib/main.dart b/classic/frontend/lib/main.dart index 8e0cac477600..bdb86047267c 100644 --- a/classic/frontend/lib/main.dart +++ b/classic/frontend/lib/main.dart @@ -75,6 +75,8 @@ void main() async { } class MyApp extends StatelessWidget { + const MyApp({super.key}); + @override Widget build(BuildContext context) { final taskService = Provider.of(context, listen: false); @@ -90,7 +92,7 @@ class MyApp extends StatelessWidget { stream: FirebaseAuth.instance.authStateChanges(), builder: (context, snapshot) { if (snapshot.connectionState == ConnectionState.waiting) { - return CircularProgressIndicator(); + return const CircularProgressIndicator(); } String hostname = Uri.base.host; diff --git a/classic/frontend/lib/services/auth_service.dart b/classic/frontend/lib/services/auth_service.dart index 2a3693638099..e79eaf118596 100644 --- a/classic/frontend/lib/services/auth_service.dart +++ b/classic/frontend/lib/services/auth_service.dart @@ -26,6 +26,7 @@ class AuthService { print("Error during Google Sign-In: $e"); return null; } + return null; } // Sign in with GitHub using redirect diff --git a/classic/frontend/lib/services/shared_preferences_service.dart b/classic/frontend/lib/services/shared_preferences_service.dart index df100316955f..a5413394781b 100644 --- a/classic/frontend/lib/services/shared_preferences_service.dart +++ b/classic/frontend/lib/services/shared_preferences_service.dart @@ -6,7 +6,7 @@ class SharedPreferencesService { static final SharedPreferencesService instance = SharedPreferencesService._privateConstructor(); - Future _prefs = SharedPreferences.getInstance(); + final Future _prefs = SharedPreferences.getInstance(); /// Sets a boolean [value] for the given [key] in the shared preferences. /// diff --git a/classic/frontend/lib/viewmodels/chat_viewmodel.dart b/classic/frontend/lib/viewmodels/chat_viewmodel.dart index bae2f3c5f634..4031ab34065a 100644 --- a/classic/frontend/lib/viewmodels/chat_viewmodel.dart +++ b/classic/frontend/lib/viewmodels/chat_viewmodel.dart @@ -95,7 +95,7 @@ class ChatViewModel with ChangeNotifier { } // Assign the chats list - if (chats.length > 0) { + if (chats.isNotEmpty) { _chats = chats; } diff --git a/classic/frontend/lib/viewmodels/task_queue_viewmodel.dart b/classic/frontend/lib/viewmodels/task_queue_viewmodel.dart index 4ca27037744e..1466c17636bf 100644 --- a/classic/frontend/lib/viewmodels/task_queue_viewmodel.dart +++ b/classic/frontend/lib/viewmodels/task_queue_viewmodel.dart @@ -87,10 +87,10 @@ class TaskQueueViewModel extends ChangeNotifier { node.id, skillTreeNodes, skillTreeEdges) .where((child) => !visited.contains(child.id)); - children.forEach((child) { + for (var child in children) { visited.add(child.id); stack.push(child); - }); + } } else { stack .pop(); // Remove the node if not all parents are visited, it will be re-added when its parents are visited @@ -232,7 +232,7 @@ class TaskQueueViewModel extends ChangeNotifier { benchmarkStatusMap[node] = successStatus ? BenchmarkTaskStatus.success : BenchmarkTaskStatus.failure; - await Future.delayed(Duration(seconds: 1)); + await Future.delayed(const Duration(seconds: 1)); notifyListeners(); testSuite.tests.add(task); diff --git a/classic/frontend/lib/views/chat/agent_message_tile.dart b/classic/frontend/lib/views/chat/agent_message_tile.dart index 029f487c0501..dedcc7971a7c 100644 --- a/classic/frontend/lib/views/chat/agent_message_tile.dart +++ b/classic/frontend/lib/views/chat/agent_message_tile.dart @@ -30,8 +30,7 @@ class _AgentMessageTileState extends State { bool containsMarkdown(String text) { // Regular expression to detect Markdown patterns like headers, bold, links, etc. final RegExp markdownPattern = RegExp( - r'(?:\*\*|__).*?(?:\*\*|__)|' + // Bold - r'(?:\*|_).*?(?:\*|_)|' + // Italic + r'(?:\*\*|__).*?(?:\*\*|__)|' r'(?:\*|_).*?(?:\*|_)|' + // Italic r'\[.*?\]\(.*?\)|' + // Links r'!\[.*?\]\(.*?\)|' + // Images r'#{1,6}.*|' + // Headers @@ -89,7 +88,7 @@ class _AgentMessageTileState extends State { styleSheet: MarkdownStyleSheet.fromTheme( Theme.of(context)) .copyWith( - blockquoteDecoration: BoxDecoration( + blockquoteDecoration: const BoxDecoration( color: Colors .black, // Background color for blockquotes border: Border( diff --git a/classic/frontend/lib/views/chat/chat_input_field.dart b/classic/frontend/lib/views/chat/chat_input_field.dart index 63afa2319264..637ceba53f94 100644 --- a/classic/frontend/lib/views/chat/chat_input_field.dart +++ b/classic/frontend/lib/views/chat/chat_input_field.dart @@ -1,7 +1,6 @@ import 'package:auto_gpt_flutter_client/viewmodels/chat_viewmodel.dart'; import 'package:auto_gpt_flutter_client/views/chat/continuous_mode_dialog.dart'; import 'package:flutter/material.dart'; -import 'package:shared_preferences/shared_preferences.dart'; class ChatInputField extends StatefulWidget { // Callback to be triggered when the send button is pressed diff --git a/classic/frontend/lib/views/settings/api_base_url_field.dart b/classic/frontend/lib/views/settings/api_base_url_field.dart index 6593757c4173..fb0995af3f02 100644 --- a/classic/frontend/lib/views/settings/api_base_url_field.dart +++ b/classic/frontend/lib/views/settings/api_base_url_field.dart @@ -5,6 +5,8 @@ import 'package:provider/provider.dart'; class ApiBaseUrlField extends StatelessWidget { final TextEditingController controller = TextEditingController(); + ApiBaseUrlField({super.key}); + @override Widget build(BuildContext context) { return Consumer( diff --git a/classic/frontend/lib/views/side_bar/side_bar_view.dart b/classic/frontend/lib/views/side_bar/side_bar_view.dart index e6610a7bd436..fdb28ce0b480 100644 --- a/classic/frontend/lib/views/side_bar/side_bar_view.dart +++ b/classic/frontend/lib/views/side_bar/side_bar_view.dart @@ -74,7 +74,7 @@ class SideBarView extends StatelessWidget { IconButton( splashRadius: 0.1, iconSize: 25, - icon: Icon(Icons.book, + icon: const Icon(Icons.book, color: Color.fromRGBO(50, 120, 123, 1)), onPressed: () => _launchURL( 'https://aiedge.medium.com/autogpt-forge-e3de53cc58ec'), diff --git a/classic/frontend/lib/views/skill_tree/tree_node_view.dart b/classic/frontend/lib/views/skill_tree/tree_node_view.dart index 83534144aafb..9d818491e04c 100644 --- a/classic/frontend/lib/views/skill_tree/tree_node_view.dart +++ b/classic/frontend/lib/views/skill_tree/tree_node_view.dart @@ -8,7 +8,7 @@ class TreeNodeView extends StatefulWidget { final SkillTreeNode node; final bool selected; - TreeNodeView({required this.node, this.selected = false}); + const TreeNodeView({super.key, required this.node, this.selected = false}); @override _TreeNodeViewState createState() => _TreeNodeViewState(); @@ -61,10 +61,10 @@ class _TreeNodeViewState extends State { ), ), ), - SizedBox(height: 4), + const SizedBox(height: 4), Text( widget.node.label, - style: TextStyle(fontSize: 12), + style: const TextStyle(fontSize: 12), ), ], ), diff --git a/classic/frontend/lib/views/task/new_task_button.dart b/classic/frontend/lib/views/task/new_task_button.dart index e4e6621d65b0..cb4d3ccda18a 100644 --- a/classic/frontend/lib/views/task/new_task_button.dart +++ b/classic/frontend/lib/views/task/new_task_button.dart @@ -18,12 +18,12 @@ class NewTaskButton extends StatelessWidget { onPressed: onPressed, style: ButtonStyle( // Set the button's background color - backgroundColor: MaterialStateProperty.all(Colors.white), + backgroundColor: WidgetStateProperty.all(Colors.white), // Set the button's edge - side: MaterialStateProperty.all( + side: WidgetStateProperty.all( const BorderSide(color: Colors.black, width: 0.5)), // Set the button's shape with rounded corners - shape: MaterialStateProperty.all( + shape: WidgetStateProperty.all( RoundedRectangleBorder( borderRadius: BorderRadius.circular(8.0), ), diff --git a/classic/frontend/lib/views/task/task_list_tile.dart b/classic/frontend/lib/views/task/task_list_tile.dart index 7025d06ad0a1..7edbd1a404e2 100644 --- a/classic/frontend/lib/views/task/task_list_tile.dart +++ b/classic/frontend/lib/views/task/task_list_tile.dart @@ -15,6 +15,7 @@ class TaskListTile extends StatelessWidget { this.selected = false, }) : super(key: key); + @override Widget build(BuildContext context) { // Determine the width of the TaskView double taskViewWidth = MediaQuery.of(context).size.width; diff --git a/classic/frontend/lib/views/task/test_suite_detail_view.dart b/classic/frontend/lib/views/task/test_suite_detail_view.dart index ff7d2833c594..508b42c9058f 100644 --- a/classic/frontend/lib/views/task/test_suite_detail_view.dart +++ b/classic/frontend/lib/views/task/test_suite_detail_view.dart @@ -26,9 +26,9 @@ class _TestSuiteDetailViewState extends State { appBar: AppBar( backgroundColor: Colors.grey, foregroundColor: Colors.black, - title: Text("${widget.testSuite.timestamp}"), + title: Text(widget.testSuite.timestamp), leading: IconButton( - icon: Icon(Icons.arrow_back), + icon: const Icon(Icons.arrow_back), onPressed: () => widget.viewModel.deselectTestSuite(), ), ), diff --git a/classic/frontend/lib/views/task/test_suite_list_tile.dart b/classic/frontend/lib/views/task/test_suite_list_tile.dart index 5e8e183a2ffe..9c84d7cf3b1e 100644 --- a/classic/frontend/lib/views/task/test_suite_list_tile.dart +++ b/classic/frontend/lib/views/task/test_suite_list_tile.dart @@ -11,6 +11,7 @@ class TestSuiteListTile extends StatelessWidget { required this.onTap, }) : super(key: key); + @override Widget build(BuildContext context) { // Determine the width of the TaskView double taskViewWidth = MediaQuery.of(context).size.width; diff --git a/classic/frontend/lib/views/task_queue/leaderboard_submission_button.dart b/classic/frontend/lib/views/task_queue/leaderboard_submission_button.dart index dfa9d917b3a3..4d4b7dd76041 100644 --- a/classic/frontend/lib/views/task_queue/leaderboard_submission_button.dart +++ b/classic/frontend/lib/views/task_queue/leaderboard_submission_button.dart @@ -5,8 +5,8 @@ class LeaderboardSubmissionButton extends StatelessWidget { final VoidCallback? onPressed; final bool isDisabled; - LeaderboardSubmissionButton( - {required this.onPressed, this.isDisabled = false}); + const LeaderboardSubmissionButton( + {super.key, required this.onPressed, this.isDisabled = false}); @override Widget build(BuildContext context) { diff --git a/classic/frontend/lib/views/task_queue/leaderboard_submission_dialog.dart b/classic/frontend/lib/views/task_queue/leaderboard_submission_dialog.dart index bf169d143db2..0dd28eab99a1 100644 --- a/classic/frontend/lib/views/task_queue/leaderboard_submission_dialog.dart +++ b/classic/frontend/lib/views/task_queue/leaderboard_submission_dialog.dart @@ -199,7 +199,7 @@ class _LeaderboardSubmissionDialogState ), ), ), - SizedBox(width: 8), + const SizedBox(width: 8), // Submit Button SizedBox( width: 106, diff --git a/classic/frontend/lib/views/task_queue/task_queue_view.dart b/classic/frontend/lib/views/task_queue/task_queue_view.dart index adda890ab82b..1c1fec0ab5de 100644 --- a/classic/frontend/lib/views/task_queue/task_queue_view.dart +++ b/classic/frontend/lib/views/task_queue/task_queue_view.dart @@ -9,6 +9,8 @@ import 'package:flutter/material.dart'; import 'package:provider/provider.dart'; class TaskQueueView extends StatelessWidget { + const TaskQueueView({super.key}); + @override Widget build(BuildContext context) { // TODO: This should be injected instead @@ -33,7 +35,7 @@ class TaskQueueView extends StatelessWidget { switch (viewModel.benchmarkStatusMap[node]) { case null: case BenchmarkTaskStatus.notStarted: - leadingWidget = CircleAvatar( + leadingWidget = const CircleAvatar( radius: 12, backgroundColor: Colors.grey, child: CircleAvatar( @@ -43,7 +45,7 @@ class TaskQueueView extends StatelessWidget { ); break; case BenchmarkTaskStatus.inProgress: - leadingWidget = SizedBox( + leadingWidget = const SizedBox( width: 24, height: 24, child: CircularProgressIndicator( @@ -52,7 +54,7 @@ class TaskQueueView extends StatelessWidget { ); break; case BenchmarkTaskStatus.success: - leadingWidget = CircleAvatar( + leadingWidget = const CircleAvatar( radius: 12, backgroundColor: Colors.green, child: CircleAvatar( @@ -62,7 +64,7 @@ class TaskQueueView extends StatelessWidget { ); break; case BenchmarkTaskStatus.failure: - leadingWidget = CircleAvatar( + leadingWidget = const CircleAvatar( radius: 12, backgroundColor: Colors.red, child: CircleAvatar( @@ -74,7 +76,7 @@ class TaskQueueView extends StatelessWidget { } return Container( - margin: EdgeInsets.fromLTRB(20, 5, 20, 5), + margin: const EdgeInsets.fromLTRB(20, 5, 20, 5), decoration: BoxDecoration( color: Colors.white, border: Border.all(color: Colors.black, width: 1), @@ -82,9 +84,9 @@ class TaskQueueView extends StatelessWidget { ), child: ListTile( leading: leadingWidget, - title: Center(child: Text('${node.label}')), + title: Center(child: Text(node.label)), subtitle: - Center(child: Text('${node.data.info.description}')), + Center(child: Text(node.data.info.description)), ), ); }, @@ -93,7 +95,7 @@ class TaskQueueView extends StatelessWidget { // Buttons at the bottom Padding( - padding: EdgeInsets.all(20), + padding: const EdgeInsets.all(20), child: Column( children: [ // TestSuiteButton @@ -120,7 +122,7 @@ class TaskQueueView extends StatelessWidget { viewModel.runBenchmark(chatViewModel, taskViewModel); }, ), - SizedBox(height: 8), // Gap of 8 points between buttons + const SizedBox(height: 8), // Gap of 8 points between buttons ], ), ), diff --git a/classic/frontend/lib/views/task_queue/test_suite_button.dart b/classic/frontend/lib/views/task_queue/test_suite_button.dart index c1ddca279d11..cfde442e673c 100644 --- a/classic/frontend/lib/views/task_queue/test_suite_button.dart +++ b/classic/frontend/lib/views/task_queue/test_suite_button.dart @@ -8,7 +8,7 @@ class TestSuiteButton extends StatefulWidget { final Function(String) onPlayPressed; String selectedOptionString; - TestSuiteButton({ + TestSuiteButton({super.key, this.isDisabled = false, required this.onOptionSelected, required this.onPlayPressed, diff --git a/classic/frontend/pubspec.lock b/classic/frontend/pubspec.lock index 145d8393797c..70adde6a00b5 100644 --- a/classic/frontend/pubspec.lock +++ b/classic/frontend/pubspec.lock @@ -53,10 +53,10 @@ packages: dependency: "direct main" description: name: collection - sha256: f092b211a4319e98e5ff58223576de6c2803db36221657b46c82574721240687 + sha256: ee67cb0715911d28db6bf4af1026078bd6f0128b07a5f66fb2ed94ec6783c09a url: "https://pub.dev" source: hosted - version: "1.17.2" + version: "1.18.0" crypto: dependency: transitive description: @@ -304,6 +304,30 @@ packages: url: "https://pub.dev" source: hosted version: "0.6.7" + leak_tracker: + dependency: transitive + description: + name: leak_tracker + sha256: "3f87a60e8c63aecc975dda1ceedbc8f24de75f09e4856ea27daf8958f2f0ce05" + url: "https://pub.dev" + source: hosted + version: "10.0.5" + leak_tracker_flutter_testing: + dependency: transitive + description: + name: leak_tracker_flutter_testing + sha256: "932549fb305594d82d7183ecd9fa93463e9914e1b67cacc34bc40906594a1806" + url: "https://pub.dev" + source: hosted + version: "3.0.5" + leak_tracker_testing: + dependency: transitive + description: + name: leak_tracker_testing + sha256: "6ba465d5d76e67ddf503e1161d1f4a6bc42306f9d66ca1e8f079a47290fb06d3" + url: "https://pub.dev" + source: hosted + version: "3.0.1" lints: dependency: transitive description: @@ -324,26 +348,26 @@ packages: dependency: transitive description: name: matcher - sha256: "1803e76e6653768d64ed8ff2e1e67bea3ad4b923eb5c56a295c3e634bad5960e" + sha256: d2323aa2060500f906aa31a895b4030b6da3ebdcc5619d14ce1aada65cd161cb url: "https://pub.dev" source: hosted - version: "0.12.16" + version: "0.12.16+1" material_color_utilities: dependency: transitive description: name: material_color_utilities - sha256: "9528f2f296073ff54cb9fee677df673ace1218163c3bc7628093e7eed5203d41" + sha256: f7142bb1154231d7ea5f96bc7bde4bda2a0945d2806bb11670e30b850d56bdec url: "https://pub.dev" source: hosted - version: "0.5.0" + version: "0.11.1" meta: dependency: transitive description: name: meta - sha256: "3c74dbf8763d36539f114c799d8a2d87343b5067e9d796ca22b5eb8437090ee3" + sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 url: "https://pub.dev" source: hosted - version: "1.9.1" + version: "1.15.0" nested: dependency: transitive description: @@ -356,10 +380,10 @@ packages: dependency: transitive description: name: path - sha256: "8829d8a55c13fc0e37127c29fedf290c102f4e40ae94ada574091fe0ff96c917" + sha256: "087ce49c3f0dc39180befefc60fdb4acd8f8620e5682fe2476afd0b3688bb4af" url: "https://pub.dev" source: hosted - version: "1.8.3" + version: "1.9.0" path_provider_linux: dependency: transitive description: @@ -497,18 +521,18 @@ packages: dependency: transitive description: name: stack_trace - sha256: c3c7d8edb15bee7f0f74debd4b9c5f3c2ea86766fe4178eb2a18eb30a0bdaed5 + sha256: "73713990125a6d93122541237550ee3352a2d84baad52d375a4cad2eb9b7ce0b" url: "https://pub.dev" source: hosted - version: "1.11.0" + version: "1.11.1" stream_channel: dependency: transitive description: name: stream_channel - sha256: "83615bee9045c1d322bbbd1ba209b7a749c2cbcdcb3fdd1df8eb488b3279c1c8" + sha256: ba2aa5d8cc609d96bbb2899c28934f9e1af5cddbd60a827822ea467161eb54e7 url: "https://pub.dev" source: hosted - version: "2.1.1" + version: "2.1.2" string_scanner: dependency: transitive description: @@ -529,10 +553,10 @@ packages: dependency: transitive description: name: test_api - sha256: "75760ffd7786fffdfb9597c35c5b27eaeec82be8edfb6d71d32651128ed7aab8" + sha256: "5b8a98dafc4d5c4c9c72d8b31ab2b23fc13422348d2997120294d3bac86b4ddb" url: "https://pub.dev" source: hosted - version: "0.6.0" + version: "0.7.2" typed_data: dependency: transitive description: @@ -621,14 +645,14 @@ packages: url: "https://pub.dev" source: hosted version: "2.1.4" - web: + vm_service: dependency: transitive description: - name: web - sha256: dc8ccd225a2005c1be616fe02951e2e342092edf968cf0844220383757ef8f10 + name: vm_service + sha256: "5c5f338a667b4c644744b661f309fb8080bb94b18a7e91ef1dbd343bed00ed6d" url: "https://pub.dev" source: hosted - version: "0.1.4-beta" + version: "14.2.5" win32: dependency: transitive description: @@ -646,5 +670,5 @@ packages: source: hosted version: "1.0.3" sdks: - dart: ">=3.1.0-185.0.dev <4.0.0" - flutter: ">=3.10.0" + dart: ">=3.3.0 <4.0.0" + flutter: ">=3.18.0-18.0.pre.54" From 3e3654499dab406128c6ec90f98c30b256063f9d Mon Sep 17 00:00:00 2001 From: Dman0808 <168228320+Dman0808@users.noreply.github.com> Date: Tue, 15 Oct 2024 15:23:10 -0500 Subject: [PATCH 07/14] Fix bug in 1.bug.yml file --- classic/frontend/pubspec.lock | 674 ---------------------------------- 1 file changed, 674 deletions(-) delete mode 100644 classic/frontend/pubspec.lock diff --git a/classic/frontend/pubspec.lock b/classic/frontend/pubspec.lock deleted file mode 100644 index 70adde6a00b5..000000000000 --- a/classic/frontend/pubspec.lock +++ /dev/null @@ -1,674 +0,0 @@ -# Generated by pub -# See https://dart.dev/tools/pub/glossary#lockfile -packages: - _flutterfire_internals: - dependency: transitive - description: - name: _flutterfire_internals - sha256: "2d8e8e123ca3675625917f535fcc0d3a50092eef44334168f9b18adc050d4c6e" - url: "https://pub.dev" - source: hosted - version: "1.3.6" - args: - dependency: transitive - description: - name: args - sha256: eef6c46b622e0494a36c5a12d10d77fb4e855501a91c1b9ef9339326e58f0596 - url: "https://pub.dev" - source: hosted - version: "2.4.2" - async: - dependency: transitive - description: - name: async - sha256: "947bfcf187f74dbc5e146c9eb9c0f10c9f8b30743e341481c1e2ed3ecc18c20c" - url: "https://pub.dev" - source: hosted - version: "2.11.0" - boolean_selector: - dependency: transitive - description: - name: boolean_selector - sha256: "6cfb5af12253eaf2b368f07bacc5a80d1301a071c73360d746b7f2e32d762c66" - url: "https://pub.dev" - source: hosted - version: "2.1.1" - characters: - dependency: transitive - description: - name: characters - sha256: "04a925763edad70e8443c99234dc3328f442e811f1d8fd1a72f1c8ad0f69a605" - url: "https://pub.dev" - source: hosted - version: "1.3.0" - clock: - dependency: transitive - description: - name: clock - sha256: cb6d7f03e1de671e34607e909a7213e31d7752be4fb66a86d29fe1eb14bfb5cf - url: "https://pub.dev" - source: hosted - version: "1.1.1" - collection: - dependency: "direct main" - description: - name: collection - sha256: ee67cb0715911d28db6bf4af1026078bd6f0128b07a5f66fb2ed94ec6783c09a - url: "https://pub.dev" - source: hosted - version: "1.18.0" - crypto: - dependency: transitive - description: - name: crypto - sha256: ff625774173754681d66daaf4a448684fb04b78f902da9cb3d308c19cc5e8bab - url: "https://pub.dev" - source: hosted - version: "3.0.3" - cupertino_icons: - dependency: "direct main" - description: - name: cupertino_icons - sha256: e35129dc44c9118cee2a5603506d823bab99c68393879edb440e0090d07586be - url: "https://pub.dev" - source: hosted - version: "1.0.5" - fake_async: - dependency: transitive - description: - name: fake_async - sha256: "511392330127add0b769b75a987850d136345d9227c6b94c96a04cf4a391bf78" - url: "https://pub.dev" - source: hosted - version: "1.3.1" - ffi: - dependency: transitive - description: - name: ffi - sha256: "7bf0adc28a23d395f19f3f1eb21dd7cfd1dd9f8e1c50051c069122e6853bc878" - url: "https://pub.dev" - source: hosted - version: "2.1.0" - file: - dependency: transitive - description: - name: file - sha256: "1b92bec4fc2a72f59a8e15af5f52cd441e4a7860b49499d69dfa817af20e925d" - url: "https://pub.dev" - source: hosted - version: "6.1.4" - firebase_analytics: - dependency: "direct main" - description: - name: firebase_analytics - sha256: c35213b72c9dbab6a20954bb968ed70e7d9e0ea3acb3426b9d4f4a51a522cdb4 - url: "https://pub.dev" - source: hosted - version: "10.5.0" - firebase_analytics_platform_interface: - dependency: transitive - description: - name: firebase_analytics_platform_interface - sha256: "9a8bdbf5345de01f7f1905c9ab6f9bff0b7fd739620d68c16b3b3b639b487dc3" - url: "https://pub.dev" - source: hosted - version: "3.7.0" - firebase_analytics_web: - dependency: transitive - description: - name: firebase_analytics_web - sha256: da79ab9c1e32c389cd6224939a0437a9e074783e3f2b51e9dc6d850d769d9af8 - url: "https://pub.dev" - source: hosted - version: "0.5.5" - firebase_auth: - dependency: "direct main" - description: - name: firebase_auth - sha256: "6d9be853426ab686d68076b8007ac29b2c31e7d549444a45b5c3fe1abc249fb0" - url: "https://pub.dev" - source: hosted - version: "4.9.0" - firebase_auth_platform_interface: - dependency: transitive - description: - name: firebase_auth_platform_interface - sha256: "2946cfdc17f925fa9771dd0ba3ce9dd2d019100a8685d0557c161f7786ea9b14" - url: "https://pub.dev" - source: hosted - version: "6.18.0" - firebase_auth_web: - dependency: transitive - description: - name: firebase_auth_web - sha256: d8972d754702a3f4881184706b8056e2837d0dae91613a43b988c960b8e0d988 - url: "https://pub.dev" - source: hosted - version: "5.8.0" - firebase_core: - dependency: "direct main" - description: - name: firebase_core - sha256: "675c209c94a1817649137cbd113fc4c9ae85e48d03dd578629abbec6d8a4d93d" - url: "https://pub.dev" - source: hosted - version: "2.16.0" - firebase_core_platform_interface: - dependency: transitive - description: - name: firebase_core_platform_interface - sha256: b63e3be6c96ef5c33bdec1aab23c91eb00696f6452f0519401d640938c94cba2 - url: "https://pub.dev" - source: hosted - version: "4.8.0" - firebase_core_web: - dependency: transitive - description: - name: firebase_core_web - sha256: e8c408923cd3a25bd342c576a114f2126769cd1a57106a4edeaa67ea4a84e962 - url: "https://pub.dev" - source: hosted - version: "2.8.0" - flutter: - dependency: "direct main" - description: flutter - source: sdk - version: "0.0.0" - flutter_highlight: - dependency: "direct main" - description: - name: flutter_highlight - sha256: "7b96333867aa07e122e245c033b8ad622e4e3a42a1a2372cbb098a2541d8782c" - url: "https://pub.dev" - source: hosted - version: "0.7.0" - flutter_lints: - dependency: "direct dev" - description: - name: flutter_lints - sha256: "2118df84ef0c3ca93f96123a616ae8540879991b8b57af2f81b76a7ada49b2a4" - url: "https://pub.dev" - source: hosted - version: "2.0.2" - flutter_markdown: - dependency: "direct main" - description: - name: flutter_markdown - sha256: "8afc9a6aa6d8e8063523192ba837149dbf3d377a37c0b0fc579149a1fbd4a619" - url: "https://pub.dev" - source: hosted - version: "0.6.18" - flutter_test: - dependency: "direct dev" - description: flutter - source: sdk - version: "0.0.0" - flutter_web_plugins: - dependency: transitive - description: flutter - source: sdk - version: "0.0.0" - fluttertoast: - dependency: "direct main" - description: - name: fluttertoast - sha256: "474f7d506230897a3cd28c965ec21c5328ae5605fc9c400cd330e9e9d6ac175c" - url: "https://pub.dev" - source: hosted - version: "8.2.2" - google_identity_services_web: - dependency: transitive - description: - name: google_identity_services_web - sha256: "554748f2478619076128152c58905620d10f9c7fc270ff1d3a9675f9f53838ed" - url: "https://pub.dev" - source: hosted - version: "0.2.1+1" - google_sign_in: - dependency: "direct main" - description: - name: google_sign_in - sha256: f45038d27bcad37498f282295ae97eece23c9349fc16649154067b87b9f1fd03 - url: "https://pub.dev" - source: hosted - version: "6.1.5" - google_sign_in_android: - dependency: transitive - description: - name: google_sign_in_android - sha256: "8d76099cb220d4f10c7e3c24492814c733f48ecb574c45c0ccadf5d5e50b012d" - url: "https://pub.dev" - source: hosted - version: "6.1.19" - google_sign_in_ios: - dependency: transitive - description: - name: google_sign_in_ios - sha256: "8edfde9698b5951f3d02632eceb39cc283865c3cff0b03216bf951089f10345b" - url: "https://pub.dev" - source: hosted - version: "5.6.3" - google_sign_in_platform_interface: - dependency: transitive - description: - name: google_sign_in_platform_interface - sha256: "35ceee5f0eadc1c07b0b4af7553246e315c901facbb7d3dadf734ba2693ceec4" - url: "https://pub.dev" - source: hosted - version: "2.4.2" - google_sign_in_web: - dependency: transitive - description: - name: google_sign_in_web - sha256: b48263e47f9493ba4120ccdfffe7412549ee297e82b97be9b8fa16ea8919ffbe - url: "https://pub.dev" - source: hosted - version: "0.12.0+4" - graphview: - dependency: "direct main" - description: - name: graphview - sha256: bdba183583b23c30c71edea09ad5f0beef612572d3e39e855467a925bd08392f - url: "https://pub.dev" - source: hosted - version: "1.2.0" - highlight: - dependency: transitive - description: - name: highlight - sha256: "5353a83ffe3e3eca7df0abfb72dcf3fa66cc56b953728e7113ad4ad88497cf21" - url: "https://pub.dev" - source: hosted - version: "0.7.0" - http: - dependency: "direct main" - description: - name: http - sha256: "759d1a329847dd0f39226c688d3e06a6b8679668e350e2891a6474f8b4bb8525" - url: "https://pub.dev" - source: hosted - version: "1.1.0" - http_parser: - dependency: transitive - description: - name: http_parser - sha256: "2aa08ce0341cc9b354a498388e30986515406668dbcc4f7c950c3e715496693b" - url: "https://pub.dev" - source: hosted - version: "4.0.2" - js: - dependency: transitive - description: - name: js - sha256: f2c445dce49627136094980615a031419f7f3eb393237e4ecd97ac15dea343f3 - url: "https://pub.dev" - source: hosted - version: "0.6.7" - leak_tracker: - dependency: transitive - description: - name: leak_tracker - sha256: "3f87a60e8c63aecc975dda1ceedbc8f24de75f09e4856ea27daf8958f2f0ce05" - url: "https://pub.dev" - source: hosted - version: "10.0.5" - leak_tracker_flutter_testing: - dependency: transitive - description: - name: leak_tracker_flutter_testing - sha256: "932549fb305594d82d7183ecd9fa93463e9914e1b67cacc34bc40906594a1806" - url: "https://pub.dev" - source: hosted - version: "3.0.5" - leak_tracker_testing: - dependency: transitive - description: - name: leak_tracker_testing - sha256: "6ba465d5d76e67ddf503e1161d1f4a6bc42306f9d66ca1e8f079a47290fb06d3" - url: "https://pub.dev" - source: hosted - version: "3.0.1" - lints: - dependency: transitive - description: - name: lints - sha256: "6b0206b0bf4f04961fc5438198ccb3a885685cd67d4d4a32cc20ad7f8adbe015" - url: "https://pub.dev" - source: hosted - version: "2.1.0" - markdown: - dependency: transitive - description: - name: markdown - sha256: acf35edccc0463a9d7384e437c015a3535772e09714cf60e07eeef3a15870dcd - url: "https://pub.dev" - source: hosted - version: "7.1.1" - matcher: - dependency: transitive - description: - name: matcher - sha256: d2323aa2060500f906aa31a895b4030b6da3ebdcc5619d14ce1aada65cd161cb - url: "https://pub.dev" - source: hosted - version: "0.12.16+1" - material_color_utilities: - dependency: transitive - description: - name: material_color_utilities - sha256: f7142bb1154231d7ea5f96bc7bde4bda2a0945d2806bb11670e30b850d56bdec - url: "https://pub.dev" - source: hosted - version: "0.11.1" - meta: - dependency: transitive - description: - name: meta - sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 - url: "https://pub.dev" - source: hosted - version: "1.15.0" - nested: - dependency: transitive - description: - name: nested - sha256: "03bac4c528c64c95c722ec99280375a6f2fc708eec17c7b3f07253b626cd2a20" - url: "https://pub.dev" - source: hosted - version: "1.0.0" - path: - dependency: transitive - description: - name: path - sha256: "087ce49c3f0dc39180befefc60fdb4acd8f8620e5682fe2476afd0b3688bb4af" - url: "https://pub.dev" - source: hosted - version: "1.9.0" - path_provider_linux: - dependency: transitive - description: - name: path_provider_linux - sha256: f7a1fe3a634fe7734c8d3f2766ad746ae2a2884abe22e241a8b301bf5cac3279 - url: "https://pub.dev" - source: hosted - version: "2.2.1" - path_provider_platform_interface: - dependency: transitive - description: - name: path_provider_platform_interface - sha256: "94b1e0dd80970c1ce43d5d4e050a9918fce4f4a775e6142424c30a29a363265c" - url: "https://pub.dev" - source: hosted - version: "2.1.1" - path_provider_windows: - dependency: transitive - description: - name: path_provider_windows - sha256: "8bc9f22eee8690981c22aa7fc602f5c85b497a6fb2ceb35ee5a5e5ed85ad8170" - url: "https://pub.dev" - source: hosted - version: "2.2.1" - platform: - dependency: transitive - description: - name: platform - sha256: ae68c7bfcd7383af3629daafb32fb4e8681c7154428da4febcff06200585f102 - url: "https://pub.dev" - source: hosted - version: "3.1.2" - plugin_platform_interface: - dependency: transitive - description: - name: plugin_platform_interface - sha256: da3fdfeccc4d4ff2da8f8c556704c08f912542c5fb3cf2233ed75372384a034d - url: "https://pub.dev" - source: hosted - version: "2.1.6" - provider: - dependency: "direct main" - description: - name: provider - sha256: cdbe7530b12ecd9eb455bdaa2fcb8d4dad22e80b8afb4798b41479d5ce26847f - url: "https://pub.dev" - source: hosted - version: "6.0.5" - quiver: - dependency: transitive - description: - name: quiver - sha256: b1c1ac5ce6688d77f65f3375a9abb9319b3cb32486bdc7a1e0fdf004d7ba4e47 - url: "https://pub.dev" - source: hosted - version: "3.2.1" - shared_preferences: - dependency: "direct main" - description: - name: shared_preferences - sha256: b7f41bad7e521d205998772545de63ff4e6c97714775902c199353f8bf1511ac - url: "https://pub.dev" - source: hosted - version: "2.2.1" - shared_preferences_android: - dependency: transitive - description: - name: shared_preferences_android - sha256: "8568a389334b6e83415b6aae55378e158fbc2314e074983362d20c562780fb06" - url: "https://pub.dev" - source: hosted - version: "2.2.1" - shared_preferences_foundation: - dependency: transitive - description: - name: shared_preferences_foundation - sha256: "7bf53a9f2d007329ee6f3df7268fd498f8373602f943c975598bbb34649b62a7" - url: "https://pub.dev" - source: hosted - version: "2.3.4" - shared_preferences_linux: - dependency: transitive - description: - name: shared_preferences_linux - sha256: c2eb5bf57a2fe9ad6988121609e47d3e07bb3bdca5b6f8444e4cf302428a128a - url: "https://pub.dev" - source: hosted - version: "2.3.1" - shared_preferences_platform_interface: - dependency: transitive - description: - name: shared_preferences_platform_interface - sha256: d4ec5fc9ebb2f2e056c617112aa75dcf92fc2e4faaf2ae999caa297473f75d8a - url: "https://pub.dev" - source: hosted - version: "2.3.1" - shared_preferences_web: - dependency: transitive - description: - name: shared_preferences_web - sha256: d762709c2bbe80626ecc819143013cc820fa49ca5e363620ee20a8b15a3e3daf - url: "https://pub.dev" - source: hosted - version: "2.2.1" - shared_preferences_windows: - dependency: transitive - description: - name: shared_preferences_windows - sha256: f763a101313bd3be87edffe0560037500967de9c394a714cd598d945517f694f - url: "https://pub.dev" - source: hosted - version: "2.3.1" - sky_engine: - dependency: transitive - description: flutter - source: sdk - version: "0.0.99" - source_span: - dependency: transitive - description: - name: source_span - sha256: "53e943d4206a5e30df338fd4c6e7a077e02254531b138a15aec3bd143c1a8b3c" - url: "https://pub.dev" - source: hosted - version: "1.10.0" - sprintf: - dependency: transitive - description: - name: sprintf - sha256: "1fc9ffe69d4df602376b52949af107d8f5703b77cda567c4d7d86a0693120f23" - url: "https://pub.dev" - source: hosted - version: "7.0.0" - stack_trace: - dependency: transitive - description: - name: stack_trace - sha256: "73713990125a6d93122541237550ee3352a2d84baad52d375a4cad2eb9b7ce0b" - url: "https://pub.dev" - source: hosted - version: "1.11.1" - stream_channel: - dependency: transitive - description: - name: stream_channel - sha256: ba2aa5d8cc609d96bbb2899c28934f9e1af5cddbd60a827822ea467161eb54e7 - url: "https://pub.dev" - source: hosted - version: "2.1.2" - string_scanner: - dependency: transitive - description: - name: string_scanner - sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" - url: "https://pub.dev" - source: hosted - version: "1.2.0" - term_glyph: - dependency: transitive - description: - name: term_glyph - sha256: a29248a84fbb7c79282b40b8c72a1209db169a2e0542bce341da992fe1bc7e84 - url: "https://pub.dev" - source: hosted - version: "1.2.1" - test_api: - dependency: transitive - description: - name: test_api - sha256: "5b8a98dafc4d5c4c9c72d8b31ab2b23fc13422348d2997120294d3bac86b4ddb" - url: "https://pub.dev" - source: hosted - version: "0.7.2" - typed_data: - dependency: transitive - description: - name: typed_data - sha256: facc8d6582f16042dd49f2463ff1bd6e2c9ef9f3d5da3d9b087e244a7b564b3c - url: "https://pub.dev" - source: hosted - version: "1.3.2" - url_launcher: - dependency: "direct main" - description: - name: url_launcher - sha256: "47e208a6711459d813ba18af120d9663c20bdf6985d6ad39fe165d2538378d27" - url: "https://pub.dev" - source: hosted - version: "6.1.14" - url_launcher_android: - dependency: transitive - description: - name: url_launcher_android - sha256: b04af59516ab45762b2ca6da40fa830d72d0f6045cd97744450b73493fa76330 - url: "https://pub.dev" - source: hosted - version: "6.1.0" - url_launcher_ios: - dependency: transitive - description: - name: url_launcher_ios - sha256: "7c65021d5dee51813d652357bc65b8dd4a6177082a9966bc8ba6ee477baa795f" - url: "https://pub.dev" - source: hosted - version: "6.1.5" - url_launcher_linux: - dependency: transitive - description: - name: url_launcher_linux - sha256: b651aad005e0cb06a01dbd84b428a301916dc75f0e7ea6165f80057fee2d8e8e - url: "https://pub.dev" - source: hosted - version: "3.0.6" - url_launcher_macos: - dependency: transitive - description: - name: url_launcher_macos - sha256: b55486791f666e62e0e8ff825e58a023fd6b1f71c49926483f1128d3bbd8fe88 - url: "https://pub.dev" - source: hosted - version: "3.0.7" - url_launcher_platform_interface: - dependency: transitive - description: - name: url_launcher_platform_interface - sha256: "95465b39f83bfe95fcb9d174829d6476216f2d548b79c38ab2506e0458787618" - url: "https://pub.dev" - source: hosted - version: "2.1.5" - url_launcher_web: - dependency: transitive - description: - name: url_launcher_web - sha256: ba140138558fcc3eead51a1c42e92a9fb074a1b1149ed3c73e66035b2ccd94f2 - url: "https://pub.dev" - source: hosted - version: "2.0.19" - url_launcher_windows: - dependency: transitive - description: - name: url_launcher_windows - sha256: "95fef3129dc7cfaba2bc3d5ba2e16063bb561fc6d78e63eee16162bc70029069" - url: "https://pub.dev" - source: hosted - version: "3.0.8" - uuid: - dependency: "direct main" - description: - name: uuid - sha256: e03928880bdbcbf496fb415573f5ab7b1ea99b9b04f669c01104d085893c3134 - url: "https://pub.dev" - source: hosted - version: "4.0.0" - vector_math: - dependency: transitive - description: - name: vector_math - sha256: "80b3257d1492ce4d091729e3a67a60407d227c27241d6927be0130c98e741803" - url: "https://pub.dev" - source: hosted - version: "2.1.4" - vm_service: - dependency: transitive - description: - name: vm_service - sha256: "5c5f338a667b4c644744b661f309fb8080bb94b18a7e91ef1dbd343bed00ed6d" - url: "https://pub.dev" - source: hosted - version: "14.2.5" - win32: - dependency: transitive - description: - name: win32 - sha256: "9e82a402b7f3d518fb9c02d0e9ae45952df31b9bf34d77baf19da2de03fc2aaa" - url: "https://pub.dev" - source: hosted - version: "5.0.7" - xdg_directories: - dependency: transitive - description: - name: xdg_directories - sha256: "589ada45ba9e39405c198fe34eb0f607cddb2108527e658136120892beac46d2" - url: "https://pub.dev" - source: hosted - version: "1.0.3" -sdks: - dart: ">=3.3.0 <4.0.0" - flutter: ">=3.18.0-18.0.pre.54" From 7e7028cf5629e091de842eeb7d7d4815a86ff1d4 Mon Sep 17 00:00:00 2001 From: Dman0808 <168228320+Dman0808@users.noreply.github.com> Date: Thu, 17 Oct 2024 07:43:23 -0500 Subject: [PATCH 08/14] master1 --- arena/TestAgent.json | 6 ++++++ autogpt_platform/frontend/flutter | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 arena/TestAgent.json diff --git a/arena/TestAgent.json b/arena/TestAgent.json new file mode 100644 index 000000000000..02c5b1b84047 --- /dev/null +++ b/arena/TestAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Nilllas/AutoGPT", + "timestamp": "2023-10-20T11:27:15.343842", + "commit_hash_to_benchmark": "2187f66149ffa4bb99f9ca6a11b592fe4d683791", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/autogpt_platform/frontend/flutter b/autogpt_platform/frontend/flutter index 2663184aa790..f67fa22d6383 160000 --- a/autogpt_platform/frontend/flutter +++ b/autogpt_platform/frontend/flutter @@ -1 +1 @@ -Subproject commit 2663184aa79047d0a33a14a3b607954f8fdd8730 +Subproject commit f67fa22d63833d855b75b80a7948fb6a0688385d From 4f8ae1998ba44fcf9b1dd142d3582cb0880f2fdb Mon Sep 17 00:00:00 2001 From: Dman0808 <168228320+Dman0808@users.noreply.github.com> Date: Thu, 17 Oct 2024 07:45:04 -0500 Subject: [PATCH 09/14] Fix bug in 1.bug.yml file --- classic/frontend/pubspec.lock | 674 ++++++++++++++++++++++++++++++++++ 1 file changed, 674 insertions(+) create mode 100644 classic/frontend/pubspec.lock diff --git a/classic/frontend/pubspec.lock b/classic/frontend/pubspec.lock new file mode 100644 index 000000000000..1f65a3a4bea4 --- /dev/null +++ b/classic/frontend/pubspec.lock @@ -0,0 +1,674 @@ +# Generated by pub +# See https://dart.dev/tools/pub/glossary#lockfile +packages: + _flutterfire_internals: + dependency: transitive + description: + name: _flutterfire_internals + sha256: "37a42d06068e2fe3deddb2da079a8c4d105f241225ba27b7122b37e9865fd8f7" + url: "https://pub.dev" + source: hosted + version: "1.3.35" + args: + dependency: transitive + description: + name: args + sha256: bf9f5caeea8d8fe6721a9c358dd8a5c1947b27f1cfaa18b39c301273594919e6 + url: "https://pub.dev" + source: hosted + version: "2.6.0" + async: + dependency: transitive + description: + name: async + sha256: "947bfcf187f74dbc5e146c9eb9c0f10c9f8b30743e341481c1e2ed3ecc18c20c" + url: "https://pub.dev" + source: hosted + version: "2.11.0" + boolean_selector: + dependency: transitive + description: + name: boolean_selector + sha256: "6cfb5af12253eaf2b368f07bacc5a80d1301a071c73360d746b7f2e32d762c66" + url: "https://pub.dev" + source: hosted + version: "2.1.1" + characters: + dependency: transitive + description: + name: characters + sha256: "04a925763edad70e8443c99234dc3328f442e811f1d8fd1a72f1c8ad0f69a605" + url: "https://pub.dev" + source: hosted + version: "1.3.0" + clock: + dependency: transitive + description: + name: clock + sha256: cb6d7f03e1de671e34607e909a7213e31d7752be4fb66a86d29fe1eb14bfb5cf + url: "https://pub.dev" + source: hosted + version: "1.1.1" + collection: + dependency: "direct main" + description: + name: collection + sha256: ee67cb0715911d28db6bf4af1026078bd6f0128b07a5f66fb2ed94ec6783c09a + url: "https://pub.dev" + source: hosted + version: "1.18.0" + crypto: + dependency: transitive + description: + name: crypto + sha256: "1e445881f28f22d6140f181e07737b22f1e099a5e1ff94b0af2f9e4a463f4855" + url: "https://pub.dev" + source: hosted + version: "3.0.6" + cupertino_icons: + dependency: "direct main" + description: + name: cupertino_icons + sha256: ba631d1c7f7bef6b729a622b7b752645a2d076dba9976925b8f25725a30e1ee6 + url: "https://pub.dev" + source: hosted + version: "1.0.8" + fake_async: + dependency: transitive + description: + name: fake_async + sha256: "511392330127add0b769b75a987850d136345d9227c6b94c96a04cf4a391bf78" + url: "https://pub.dev" + source: hosted + version: "1.3.1" + ffi: + dependency: transitive + description: + name: ffi + sha256: "16ed7b077ef01ad6170a3d0c57caa4a112a38d7a2ed5602e0aca9ca6f3d98da6" + url: "https://pub.dev" + source: hosted + version: "2.1.3" + file: + dependency: transitive + description: + name: file + sha256: a3b4f84adafef897088c160faf7dfffb7696046cb13ae90b508c2cbc95d3b8d4 + url: "https://pub.dev" + source: hosted + version: "7.0.1" + firebase_analytics: + dependency: "direct main" + description: + name: firebase_analytics + sha256: dbf1e7ab22cfb1f4a4adb103b46a26276b4edc593d4a78ef6fb942bafc92e035 + url: "https://pub.dev" + source: hosted + version: "10.10.7" + firebase_analytics_platform_interface: + dependency: transitive + description: + name: firebase_analytics_platform_interface + sha256: "3729b74f8cf1d974a27ba70332ecb55ff5ff560edc8164a6469f4a055b429c37" + url: "https://pub.dev" + source: hosted + version: "3.10.8" + firebase_analytics_web: + dependency: transitive + description: + name: firebase_analytics_web + sha256: "019cd7eee74254d33fbd2e29229367ce33063516bf6b3258a341d89e3b0f1655" + url: "https://pub.dev" + source: hosted + version: "0.5.7+7" + firebase_auth: + dependency: "direct main" + description: + name: firebase_auth + sha256: "279b2773ff61afd9763202cb5582e2b995ee57419d826b9af6517302a59b672f" + url: "https://pub.dev" + source: hosted + version: "4.16.0" + firebase_auth_platform_interface: + dependency: transitive + description: + name: firebase_auth_platform_interface + sha256: a0270e1db3b2098a14cb2a2342b3cd2e7e458e0c391b1f64f6f78b14296ec093 + url: "https://pub.dev" + source: hosted + version: "7.3.0" + firebase_auth_web: + dependency: transitive + description: + name: firebase_auth_web + sha256: c7b1379ccef7abf4b6816eede67a868c44142198e42350f51c01d8fc03f95a7d + url: "https://pub.dev" + source: hosted + version: "5.8.13" + firebase_core: + dependency: "direct main" + description: + name: firebase_core + sha256: "26de145bb9688a90962faec6f838247377b0b0d32cc0abecd9a4e43525fc856c" + url: "https://pub.dev" + source: hosted + version: "2.32.0" + firebase_core_platform_interface: + dependency: transitive + description: + name: firebase_core_platform_interface + sha256: e30da58198a6d4b49d5bce4e852f985c32cb10db329ebef9473db2b9f09ce810 + url: "https://pub.dev" + source: hosted + version: "5.3.0" + firebase_core_web: + dependency: transitive + description: + name: firebase_core_web + sha256: f967a7138f5d2ffb1ce15950e2a382924239eaa521150a8f144af34e68b3b3e5 + url: "https://pub.dev" + source: hosted + version: "2.18.1" + fixnum: + dependency: transitive + description: + name: fixnum + sha256: b6dc7065e46c974bc7c5f143080a6764ec7a4be6da1285ececdc37be96de53be + url: "https://pub.dev" + source: hosted + version: "1.1.1" + flutter: + dependency: "direct main" + description: flutter + source: sdk + version: "0.0.0" + flutter_highlight: + dependency: "direct main" + description: + name: flutter_highlight + sha256: "7b96333867aa07e122e245c033b8ad622e4e3a42a1a2372cbb098a2541d8782c" + url: "https://pub.dev" + source: hosted + version: "0.7.0" + flutter_lints: + dependency: "direct dev" + description: + name: flutter_lints + sha256: a25a15ebbdfc33ab1cd26c63a6ee519df92338a9c10f122adda92938253bef04 + url: "https://pub.dev" + source: hosted + version: "2.0.3" + flutter_markdown: + dependency: "direct main" + description: + name: flutter_markdown + sha256: "04c4722cc36ec5af38acc38ece70d22d3c2123c61305d555750a091517bbe504" + url: "https://pub.dev" + source: hosted + version: "0.6.23" + flutter_test: + dependency: "direct dev" + description: flutter + source: sdk + version: "0.0.0" + flutter_web_plugins: + dependency: transitive + description: flutter + source: sdk + version: "0.0.0" + fluttertoast: + dependency: "direct main" + description: + name: fluttertoast + sha256: "95f349437aeebe524ef7d6c9bde3e6b4772717cf46a0eb6a3ceaddc740b297cc" + url: "https://pub.dev" + source: hosted + version: "8.2.8" + google_identity_services_web: + dependency: transitive + description: + name: google_identity_services_web + sha256: "5be191523702ba8d7a01ca97c17fca096822ccf246b0a9f11923a6ded06199b6" + url: "https://pub.dev" + source: hosted + version: "0.3.1+4" + google_sign_in: + dependency: "direct main" + description: + name: google_sign_in + sha256: "0b8787cb9c1a68ad398e8010e8c8766bfa33556d2ab97c439fb4137756d7308f" + url: "https://pub.dev" + source: hosted + version: "6.2.1" + google_sign_in_android: + dependency: transitive + description: + name: google_sign_in_android + sha256: "0928059d2f0840f63c7b07a30cf73b593ae872cdd0dbd46d1b9ba878d2599c01" + url: "https://pub.dev" + source: hosted + version: "6.1.33" + google_sign_in_ios: + dependency: transitive + description: + name: google_sign_in_ios + sha256: "83f015169102df1ab2905cf8abd8934e28f87db9ace7a5fa676998842fed228a" + url: "https://pub.dev" + source: hosted + version: "5.7.8" + google_sign_in_platform_interface: + dependency: transitive + description: + name: google_sign_in_platform_interface + sha256: "1f6e5787d7a120cc0359ddf315c92309069171306242e181c09472d1b00a2971" + url: "https://pub.dev" + source: hosted + version: "2.4.5" + google_sign_in_web: + dependency: transitive + description: + name: google_sign_in_web + sha256: "042805a21127a85b0dc46bba98a37926f17d2439720e8a459d27045d8ef68055" + url: "https://pub.dev" + source: hosted + version: "0.12.4+2" + graphview: + dependency: "direct main" + description: + name: graphview + sha256: bdba183583b23c30c71edea09ad5f0beef612572d3e39e855467a925bd08392f + url: "https://pub.dev" + source: hosted + version: "1.2.0" + highlight: + dependency: transitive + description: + name: highlight + sha256: "5353a83ffe3e3eca7df0abfb72dcf3fa66cc56b953728e7113ad4ad88497cf21" + url: "https://pub.dev" + source: hosted + version: "0.7.0" + http: + dependency: "direct main" + description: + name: http + sha256: b9c29a161230ee03d3ccf545097fccd9b87a5264228c5d348202e0f0c28f9010 + url: "https://pub.dev" + source: hosted + version: "1.2.2" + http_parser: + dependency: transitive + description: + name: http_parser + sha256: "2aa08ce0341cc9b354a498388e30986515406668dbcc4f7c950c3e715496693b" + url: "https://pub.dev" + source: hosted + version: "4.0.2" + js: + dependency: transitive + description: + name: js + sha256: f2c445dce49627136094980615a031419f7f3eb393237e4ecd97ac15dea343f3 + url: "https://pub.dev" + source: hosted + version: "0.6.7" + leak_tracker: + dependency: transitive + description: + name: leak_tracker + sha256: "3f87a60e8c63aecc975dda1ceedbc8f24de75f09e4856ea27daf8958f2f0ce05" + url: "https://pub.dev" + source: hosted + version: "10.0.5" + leak_tracker_flutter_testing: + dependency: transitive + description: + name: leak_tracker_flutter_testing + sha256: "932549fb305594d82d7183ecd9fa93463e9914e1b67cacc34bc40906594a1806" + url: "https://pub.dev" + source: hosted + version: "3.0.5" + leak_tracker_testing: + dependency: transitive + description: + name: leak_tracker_testing + sha256: "6ba465d5d76e67ddf503e1161d1f4a6bc42306f9d66ca1e8f079a47290fb06d3" + url: "https://pub.dev" + source: hosted + version: "3.0.1" + lints: + dependency: transitive + description: + name: lints + sha256: "0a217c6c989d21039f1498c3ed9f3ed71b354e69873f13a8dfc3c9fe76f1b452" + url: "https://pub.dev" + source: hosted + version: "2.1.1" + markdown: + dependency: transitive + description: + name: markdown + sha256: ef2a1298144e3f985cc736b22e0ccdaf188b5b3970648f2d9dc13efd1d9df051 + url: "https://pub.dev" + source: hosted + version: "7.2.2" + matcher: + dependency: transitive + description: + name: matcher + sha256: d2323aa2060500f906aa31a895b4030b6da3ebdcc5619d14ce1aada65cd161cb + url: "https://pub.dev" + source: hosted + version: "0.12.16+1" + material_color_utilities: + dependency: transitive + description: + name: material_color_utilities + sha256: f7142bb1154231d7ea5f96bc7bde4bda2a0945d2806bb11670e30b850d56bdec + url: "https://pub.dev" + source: hosted + version: "0.11.1" + meta: + dependency: transitive + description: + name: meta + sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 + url: "https://pub.dev" + source: hosted + version: "1.15.0" + nested: + dependency: transitive + description: + name: nested + sha256: "03bac4c528c64c95c722ec99280375a6f2fc708eec17c7b3f07253b626cd2a20" + url: "https://pub.dev" + source: hosted + version: "1.0.0" + path: + dependency: transitive + description: + name: path + sha256: "087ce49c3f0dc39180befefc60fdb4acd8f8620e5682fe2476afd0b3688bb4af" + url: "https://pub.dev" + source: hosted + version: "1.9.0" + path_provider_linux: + dependency: transitive + description: + name: path_provider_linux + sha256: f7a1fe3a634fe7734c8d3f2766ad746ae2a2884abe22e241a8b301bf5cac3279 + url: "https://pub.dev" + source: hosted + version: "2.2.1" + path_provider_platform_interface: + dependency: transitive + description: + name: path_provider_platform_interface + sha256: "88f5779f72ba699763fa3a3b06aa4bf6de76c8e5de842cf6f29e2e06476c2334" + url: "https://pub.dev" + source: hosted + version: "2.1.2" + path_provider_windows: + dependency: transitive + description: + name: path_provider_windows + sha256: bd6f00dbd873bfb70d0761682da2b3a2c2fccc2b9e84c495821639601d81afe7 + url: "https://pub.dev" + source: hosted + version: "2.3.0" + platform: + dependency: transitive + description: + name: platform + sha256: "5d6b1b0036a5f331ebc77c850ebc8506cbc1e9416c27e59b439f917a902a4984" + url: "https://pub.dev" + source: hosted + version: "3.1.6" + plugin_platform_interface: + dependency: transitive + description: + name: plugin_platform_interface + sha256: "4820fbfdb9478b1ebae27888254d445073732dae3d6ea81f0b7e06d5dedc3f02" + url: "https://pub.dev" + source: hosted + version: "2.1.8" + provider: + dependency: "direct main" + description: + name: provider + sha256: c8a055ee5ce3fd98d6fc872478b03823ffdb448699c6ebdbbc71d59b596fd48c + url: "https://pub.dev" + source: hosted + version: "6.1.2" + shared_preferences: + dependency: "direct main" + description: + name: shared_preferences + sha256: "746e5369a43170c25816cc472ee016d3a66bc13fcf430c0bc41ad7b4b2922051" + url: "https://pub.dev" + source: hosted + version: "2.3.2" + shared_preferences_android: + dependency: transitive + description: + name: shared_preferences_android + sha256: "3b9febd815c9ca29c9e3520d50ec32f49157711e143b7a4ca039eb87e8ade5ab" + url: "https://pub.dev" + source: hosted + version: "2.3.3" + shared_preferences_foundation: + dependency: transitive + description: + name: shared_preferences_foundation + sha256: "07e050c7cd39bad516f8d64c455f04508d09df104be326d8c02551590a0d513d" + url: "https://pub.dev" + source: hosted + version: "2.5.3" + shared_preferences_linux: + dependency: transitive + description: + name: shared_preferences_linux + sha256: "580abfd40f415611503cae30adf626e6656dfb2f0cee8f465ece7b6defb40f2f" + url: "https://pub.dev" + source: hosted + version: "2.4.1" + shared_preferences_platform_interface: + dependency: transitive + description: + name: shared_preferences_platform_interface + sha256: "57cbf196c486bc2cf1f02b85784932c6094376284b3ad5779d1b1c6c6a816b80" + url: "https://pub.dev" + source: hosted + version: "2.4.1" + shared_preferences_web: + dependency: transitive + description: + name: shared_preferences_web + sha256: d2ca4132d3946fec2184261726b355836a82c33d7d5b67af32692aff18a4684e + url: "https://pub.dev" + source: hosted + version: "2.4.2" + shared_preferences_windows: + dependency: transitive + description: + name: shared_preferences_windows + sha256: "94ef0f72b2d71bc3e700e025db3710911bd51a71cefb65cc609dd0d9a982e3c1" + url: "https://pub.dev" + source: hosted + version: "2.4.1" + sky_engine: + dependency: transitive + description: flutter + source: sdk + version: "0.0.99" + source_span: + dependency: transitive + description: + name: source_span + sha256: "53e943d4206a5e30df338fd4c6e7a077e02254531b138a15aec3bd143c1a8b3c" + url: "https://pub.dev" + source: hosted + version: "1.10.0" + sprintf: + dependency: transitive + description: + name: sprintf + sha256: "1fc9ffe69d4df602376b52949af107d8f5703b77cda567c4d7d86a0693120f23" + url: "https://pub.dev" + source: hosted + version: "7.0.0" + stack_trace: + dependency: transitive + description: + name: stack_trace + sha256: "73713990125a6d93122541237550ee3352a2d84baad52d375a4cad2eb9b7ce0b" + url: "https://pub.dev" + source: hosted + version: "1.11.1" + stream_channel: + dependency: transitive + description: + name: stream_channel + sha256: ba2aa5d8cc609d96bbb2899c28934f9e1af5cddbd60a827822ea467161eb54e7 + url: "https://pub.dev" + source: hosted + version: "2.1.2" + string_scanner: + dependency: transitive + description: + name: string_scanner + sha256: "556692adab6cfa87322a115640c11f13cb77b3f076ddcc5d6ae3c20242bedcde" + url: "https://pub.dev" + source: hosted + version: "1.2.0" + term_glyph: + dependency: transitive + description: + name: term_glyph + sha256: a29248a84fbb7c79282b40b8c72a1209db169a2e0542bce341da992fe1bc7e84 + url: "https://pub.dev" + source: hosted + version: "1.2.1" + test_api: + dependency: transitive + description: + name: test_api + sha256: "5b8a98dafc4d5c4c9c72d8b31ab2b23fc13422348d2997120294d3bac86b4ddb" + url: "https://pub.dev" + source: hosted + version: "0.7.2" + typed_data: + dependency: transitive + description: + name: typed_data + sha256: f9049c039ebfeb4cf7a7104a675823cd72dba8297f264b6637062516699fa006 + url: "https://pub.dev" + source: hosted + version: "1.4.0" + url_launcher: + dependency: "direct main" + description: + name: url_launcher + sha256: "9d06212b1362abc2f0f0d78e6f09f726608c74e3b9462e8368bb03314aa8d603" + url: "https://pub.dev" + source: hosted + version: "6.3.1" + url_launcher_android: + dependency: transitive + description: + name: url_launcher_android + sha256: "8fc3bae0b68c02c47c5c86fa8bfa74471d42687b0eded01b78de87872db745e2" + url: "https://pub.dev" + source: hosted + version: "6.3.12" + url_launcher_ios: + dependency: transitive + description: + name: url_launcher_ios + sha256: e43b677296fadce447e987a2f519dcf5f6d1e527dc35d01ffab4fff5b8a7063e + url: "https://pub.dev" + source: hosted + version: "6.3.1" + url_launcher_linux: + dependency: transitive + description: + name: url_launcher_linux + sha256: e2b9622b4007f97f504cd64c0128309dfb978ae66adbe944125ed9e1750f06af + url: "https://pub.dev" + source: hosted + version: "3.2.0" + url_launcher_macos: + dependency: transitive + description: + name: url_launcher_macos + sha256: "769549c999acdb42b8bcfa7c43d72bf79a382ca7441ab18a808e101149daf672" + url: "https://pub.dev" + source: hosted + version: "3.2.1" + url_launcher_platform_interface: + dependency: transitive + description: + name: url_launcher_platform_interface + sha256: "552f8a1e663569be95a8190206a38187b531910283c3e982193e4f2733f01029" + url: "https://pub.dev" + source: hosted + version: "2.3.2" + url_launcher_web: + dependency: transitive + description: + name: url_launcher_web + sha256: "772638d3b34c779ede05ba3d38af34657a05ac55b06279ea6edd409e323dca8e" + url: "https://pub.dev" + source: hosted + version: "2.3.3" + url_launcher_windows: + dependency: transitive + description: + name: url_launcher_windows + sha256: "49c10f879746271804767cb45551ec5592cdab00ee105c06dddde1a98f73b185" + url: "https://pub.dev" + source: hosted + version: "3.1.2" + uuid: + dependency: "direct main" + description: + name: uuid + sha256: a5be9ef6618a7ac1e964353ef476418026db906c4facdedaa299b7a2e71690ff + url: "https://pub.dev" + source: hosted + version: "4.5.1" + vector_math: + dependency: transitive + description: + name: vector_math + sha256: "80b3257d1492ce4d091729e3a67a60407d227c27241d6927be0130c98e741803" + url: "https://pub.dev" + source: hosted + version: "2.1.4" + vm_service: + dependency: transitive + description: + name: vm_service + sha256: "5c5f338a667b4c644744b661f309fb8080bb94b18a7e91ef1dbd343bed00ed6d" + url: "https://pub.dev" + source: hosted + version: "14.2.5" + web: + dependency: transitive + description: + name: web + sha256: cd3543bd5798f6ad290ea73d210f423502e71900302dde696f8bff84bf89a1cb + url: "https://pub.dev" + source: hosted + version: "1.1.0" + xdg_directories: + dependency: transitive + description: + name: xdg_directories + sha256: "7a3f37b05d989967cdddcbb571f1ea834867ae2faa29725fd085180e0883aa15" + url: "https://pub.dev" + source: hosted + version: "1.1.0" +sdks: + dart: ">=3.5.0 <4.0.0" + flutter: ">=3.24.0" From 4b38471cb0eca935ce06c417fcb21a00d043962d Mon Sep 17 00:00:00 2001 From: Dman0808 <168228320+Dman0808@users.noreply.github.com> Date: Thu, 17 Oct 2024 09:00:03 -0500 Subject: [PATCH 10/14] Create .aiexclude --- .aiexclude | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 .aiexclude diff --git a/.aiexclude b/.aiexclude new file mode 100644 index 000000000000..e69de29bb2d1 From 3781ca8b554e49078a5cfc1980a41a903b5f0807 Mon Sep 17 00:00:00 2001 From: Dman0808 <168228320+Dman0808@users.noreply.github.com> Date: Thu, 24 Oct 2024 09:08:41 -0500 Subject: [PATCH 11/14] azure --- .flake8 | 12 + .github/workflows/arena-intake.yml | 169 + .github/workflows/autogpt-ci.yml | 296 + .../workflows/autogpt-docker-cache-clean.yml | 59 + .github/workflows/autogpt-docker-ci.yml | 165 + .github/workflows/autogpt-docker-release.yml | 91 + .github/workflows/autogpts-benchmark.yml | 97 + .github/workflows/autogpts-ci.yml | 69 + .github/workflows/benchmark-ci.yml | 141 + .../workflows/benchmark_publish_package.yml | 55 + .github/workflows/close-stale-issues.yml | 34 + .github/workflows/frontend-ci.yml | 60 + .github/workflows/hackathon.yml | 133 + .github/workflows/pr-label.yml | 66 + CLI-USAGE.md | 182 + QUICKSTART.md | 200 + SECURITY.md | 66 + TROUBLESHOOTING.md | 23 + arena/480bot.json | 6 + arena/AGENT_GORDON.json | 6 + arena/AGENT_JARVIS.json | 6 + arena/AI.json | 6 + arena/AKBAgent.json | 7 + arena/ASSISTANT.json | 6 + arena/AUTO_ENGINEER.json | 6 + arena/AUTO_GPT_JON001.json | 6 + arena/Adtractive_Agent.json | 6 + arena/AgGPT.json | 6 + arena/AgentJPark.json | 6 + arena/AgentKD.json | 6 + arena/Ahmad.json | 6 + arena/Alfred.json | 6 + arena/AlphaCISO.json | 6 + arena/AndersLensway.json | 6 + arena/AntlerTestGPT.json | 1 + arena/AppleGPT.json | 6 + arena/AquaAgent.json | 1 + arena/ArtistManagerGPT.json | 6 + arena/AskOpie.json | 6 + arena/Auto.json | 6 + arena/AutoGPT-ariel.json | 6 + arena/AutoGPT2.json | 1 + arena/AutoGenius.json | 6 + arena/AutoTDD.json | 6 + arena/AutoTestGenerator.json | 6 + arena/AwareAgent.json | 6 + arena/Bagi_agent.json | 6 + arena/BanglaSgAgent.json | 6 + arena/Baptiste.json | 6 + arena/Bravo06.json | 1 + arena/Brillante-AI.json | 1 + arena/Bunny.json | 6 + arena/CCAgent.json | 6 + arena/CES-GPT.json | 6 + arena/CISLERK.json | 6 + arena/CONNECTBOT.json | 6 + arena/CYNO_AGENT.json | 6 + arena/ChadGPT.json | 1 + arena/ChrisGPT.json | 6 + arena/CodeAutoGPT.json | 6 + arena/CreaitorMarketing.json | 1 + arena/CurieAssistant.json | 6 + arena/DE.json | 6 + arena/DavidsAgent.json | 6 + arena/Derpmaster.json | 6 + arena/DevOpsAgent.json | 6 + arena/Drench.json | 6 + arena/Eduardo.json | 6 + arena/EmbeddedAg.json | 1 + arena/EnglishTestpaperAgent.json | 6 + arena/ExampleAgent.json | 6 + arena/FLASH.json | 6 + arena/FactoryGPT.json | 6 + arena/FcsummerGPT.json | 6 + arena/FynAgent.json | 6 + arena/GG.json | 6 + arena/GPTTest.json | 6 + arena/GameSoundGPT.json | 6 + arena/GeorgeGPT.json | 6 + arena/Granger.json | 6 + arena/HACKATHON.json | 6 + arena/HMD2.json | 6 + arena/Heisenberg.json | 6 + arena/HekolcuAutoGPT.json | 6 + arena/HuitzilAiAgent.json | 6 + arena/Hypeman.json | 6 + arena/IncredibubbleTea.json | 6 + arena/JackGPT.json | 6 + arena/Jarvis.json | 6 + arena/JarvisAgent.json | 6 + arena/Jean-Michel.json | 6 + arena/Job_GPT.json | 6 + arena/JoshAgent1.json | 6 + arena/KnowledgeExtractor.json | 6 + arena/LAWYER_EMAD.json | 6 + arena/LHRobot.json | 6 + arena/Lab49Agent.json | 6 + arena/LbAgent.json | 6 + arena/LegalAgent.json | 6 + arena/Light_Agent.json | 6 + arena/LinuzGPT.json | 1 + arena/Lirum.json | 6 + arena/MANU.json | 6 + arena/MEGATRON.json | 6 + arena/MOBILE.json | 6 + arena/Maharathi.json | 1 + arena/MangoAI.json | 6 + arena/MangoAgent-3.json | 6 + arena/MangoAgent-4.json | 6 + arena/MarketResearcherEduRob.json | 6 + arena/Marx.json | 6 + arena/Mary.json | 6 + arena/Melang.json | 6 + arena/Miao.json | 6 + arena/MindwareGPT.json | 6 + arena/Mira.json | 6 + arena/MoTS.json | 6 + arena/MojoBurrito.json | 6 + arena/MyAgent.json | 6 + arena/MyExample.json | 6 + arena/MyExampleAgent.json | 6 + arena/MyFirstAgent.json | 6 + arena/MyFistAgent.json | 6 + arena/MyTestAgent.json | 6 + arena/N.json | 6 + arena/NASAssistant2.json | 6 + arena/NHAN_BOT.json | 6 + arena/NadeemAgent.json | 6 + arena/NanAutoGPT.json | 6 + arena/NoobSupreme.json | 6 + arena/NumberOne.json | 6 + arena/Orange.json | 6 + arena/PAgentAI.json | 6 + arena/Pacific.json | 6 + arena/ParalegalAgent.json | 6 + arena/Pelle.json | 6 + arena/Portalen.json | 1 + arena/Pumu2_agent.json | 6 + arena/Q.json | 6 + arena/QA_AGENT.json | 6 + arena/QuantumQuill.json | 6 + arena/RAGOptimizer.json | 6 + arena/RFPScanner.json | 6 + arena/RONNIN.json | 6 + arena/RagsToRiches.json | 6 + arena/RandomVampirePictureBot.json | 6 + arena/Raslebot.json | 6 + arena/ResearchAgent.json | 1 + arena/RosterAgent.json | 6 + arena/SaasWebDev.json | 6 + arena/SaveAsPDF2.json | 6 + arena/ShiviBot.json | 6 + arena/SkorkobaniecAgent.json | 6 + arena/SmartAgent.json | 6 + arena/SmartGPT.json | 6 + arena/SouAgent.json | 6 + arena/Stragegy_Steve.json | 6 + arena/Susan.json | 6 + arena/TEST_TPK.json | 6 + arena/TLGPT.json | 6 + arena/TMarafon.json | 6 + arena/TRAVIS.json | 6 + arena/TeslaBot.json | 6 + arena/Tessa_AutoGPT_agent.json | 6 + arena/TestLbAgent.json | 6 + arena/TheAgency.json | 1 + arena/TheAgent.json | 6 + arena/TraceLLMAgent.json | 6 + arena/UGYUJI.json | 6 + arena/UTC-Crew.json | 6 + arena/UmaruAgent.json | 6 + arena/UniAgent.json | 6 + arena/Verkiezingsprogrammas.json | 6 + arena/WRITER.json | 6 + arena/WYC.json | 6 + arena/WarlockAgent.json | 6 + arena/WeatherInformer.json | 6 + arena/WiseAgent.json | 1 + arena/XXY.json | 6 + arena/YOU.json | 6 + arena/YoudaoAutoGPT.json | 6 + arena/YoutubePost_agent.json | 6 + arena/Yui3.json | 6 + arena/Yutan_agent.json | 6 + arena/ZJgpt.json | 6 + arena/Zeus.json | 6 + arena/ZhaoJianAutoGPT.json | 6 + arena/ZoeyGPT.json | 6 + arena/Zoidberg.json | 6 + arena/aWOL.json | 6 + arena/a_reverent_heart.json | 6 + arena/accidental-agent.json | 6 + arena/actor_tester.json | 6 + arena/admariner.json | 1 + arena/ag1.json | 6 + arena/agent2.json | 6 + arena/agentSmith.json | 6 + arena/agent_2.json | 6 + arena/agentgpt.json | 6 + arena/agsCehAgent.json | 6 + arena/ai_assistant.json | 6 + arena/aiaudit.json | 6 + arena/aiwowo.json | 6 + arena/aixiaoxin.json | 6 + arena/akela.json | 6 + arena/analystgpt.json | 6 + arena/arbetsformedlingen.json | 6 + arena/assistant1.json | 6 + arena/autoai.json | 6 + arena/autocoder.json | 6 + arena/autogbd.json | 6 + arena/autogpt-hackathon2.json | 1 + arena/autogpt.json | 6 + arena/autogpt_hackathon.json | 1 + arena/autogpt_hackathon1.json | 1 + arena/autogpt_warlock.json | 6 + arena/autogptagent.json | 6 + arena/avengaGPT.json | 6 + arena/babe_perphorator_.json | 6 + arena/baby_agent.json | 6 + arena/bait.json | 6 + arena/beyond.json | 6 + arena/bigman.json | 6 + arena/billy.json | 6 + arena/bingoTesting.json | 6 + arena/bosaeed_agent.json | 6 + arena/bot.json | 6 + arena/bot01.json | 6 + arena/buddy.json | 6 + arena/burt.json | 6 + arena/business.json | 6 + arena/byl.json | 6 + arena/career-agent.json | 1 + arena/caud.json | 6 + arena/ccace.json | 6 + arena/chappigpt.json | 6 + arena/chappyAi.json | 6 + arena/chatgpt_taller.json | 6 + arena/chengshu.json | 6 + arena/chenzo.json | 6 + arena/cislerk2.json | 6 + arena/codebutler.json | 1 + arena/coder_first.json | 6 + arena/contentstrategy.json | 1 + arena/cssupdater.json | 6 + arena/da-agent.json | 6 + arena/date-buffer.json | 6 + arena/davidtest1.json | 6 + arena/davidtestagent.json | 6 + arena/dda.json | 6 + arena/decision-maker.json | 6 + arena/dev_agent.json | 6 + arena/devagent.json | 6 + arena/dive2code.json | 1 + arena/dndagent.json | 6 + arena/dy_agent.json | 6 + arena/dy_agent2.json | 6 + arena/easn.json | 6 + arena/eddy.json | 6 + arena/ekc911_agent.json | 6 + arena/engineer.json | 6 + arena/evlyn.json | 6 + arena/evo-ninja.json | 6 + arena/evo.json | 6 + arena/faran.json | 6 + arena/first-agent.json | 6 + arena/foobar.json | 6 + arena/frankgarcia.json | 6 + arena/fritzgpt.json | 1 + arena/fst.json | 6 + arena/fuzz_gen.json | 6 + arena/gaby_agent.json | 6 + arena/gen_fuzz.json | 6 + arena/ghostcoder.json | 1 + arena/gipity.json | 6 + arena/gpt-dev-engineer-agent.json | 1 + arena/gpt-eng-forge.json | 6 + arena/gpt-engineer.json | 1 + arena/gpt_for_beans.json | 6 + arena/hall_oto.json | 6 + arena/han.json | 6 + arena/happy_guy.json | 6 + arena/hello.json | 6 + arena/hodri.json | 6 + arena/houxe.json | 6 + arena/icode.json | 6 + arena/iku2.json | 6 + arena/illynet.json | 6 + arena/illynetV2.json | 6 + arena/illyx1.json | 6 + arena/info-retrieval.json | 6 + arena/ivangpt_agent.json | 6 + arena/jarvis2.json | 6 + arena/jarvis3.json | 6 + arena/jaxbob1.json | 6 + arena/job_apply.json | 6 + arena/jonesyboi.json | 6 + arena/justwondering.json | 1 + arena/kingmitch.json | 6 + arena/lawk.json | 6 + arena/lcdegpt.json | 6 + arena/letst.json | 6 + arena/letstest.json | 6 + arena/lilAgent.json | 6 + arena/linggong.json | 6 + arena/liuzh.json | 6 + arena/ltzAgent.json | 6 + arena/martingpt.json | 6 + arena/medical-agent.json | 6 + arena/metware.json | 6 + arena/miniAgent.json | 6 + arena/minister_agent.json | 6 + arena/misslu.json | 6 + arena/mljar-agent.json | 1 + arena/momo.json | 1 + arena/monthly_summary.json | 6 + arena/mrSabelotodo.json | 6 + arena/myGPT.json | 6 + arena/my_AutoGPT.json | 6 + arena/my_fx_agent.json | 6 + arena/my_gpt.json | 6 + arena/mygent.json | 6 + arena/nawalj.json | 6 + arena/newAgent.json | 6 + arena/northfork.json | 6 + arena/od_agent_1.json | 6 + arena/operationAgent.json | 6 + arena/personal-al-website.json | 6 + arena/piGPT.json | 6 + arena/pipeline.json | 6 + arena/podcast_agent.json | 6 + arena/potato.json | 1 + arena/project_assitant.json | 6 + arena/project_master.json | 6 + arena/project_review.json | 6 + arena/prometheus.json | 6 + arena/proudgpt.json | 6 + arena/qinghu3.json | 6 + arena/ra.json | 6 + arena/ra1.json | 6 + arena/rachael.json | 6 + arena/raindrop.json | 6 + arena/researchGPT.json | 6 + arena/researchGPT2.json | 6 + arena/research_analyst.json | 6 + arena/robita.json | 6 + arena/robot.json | 6 + arena/searchagent.json | 6 + arena/set.json | 6 + arena/sgpt.json | 6 + arena/shivi.json | 6 + arena/sky.json | 6 + arena/smith.json | 6 + arena/songyalei.json | 6 + arena/sql.json | 6 + arena/stefan.json | 6 + arena/stockAgent.json | 6 + arena/swarms.json | 1 + arena/tdev.json | 6 + arena/teacher.json | 6 + arena/test-tpk.json | 6 + arena/test.json | 6 + arena/test1.json | 6 + arena/testGPT.json | 6 + arena/thebestagent.json | 1 + arena/theone.json | 1 + arena/tiffGPT.json | 6 + arena/trend_agent.json | 6 + arena/umiuni_agent.json | 6 + arena/uply.json | 1 + arena/url-to-lead.json | 6 + arena/v-gpt.json | 6 + arena/victor2-0.json | 6 + arena/web_developer.json | 6 + arena/webagent.json | 6 + arena/webgeek.json | 6 + arena/wedding-planner.json | 6 + arena/woohoo_agent.json | 6 + arena/wyjagent.json | 6 + arena/xmly.json | 6 + arena/xq_agent.json | 6 + arena/xt0m-GPT.json | 6 + arena/xtest.json | 6 + arena/yarbis.json | 6 + arena/zaheer.json | 6 + arena/zcb.json | 6 + arena/zczc.json | 6 + arena/zhizhi.json | 6 + arena/zlipknot_1.json | 6 + arena/zlipknot_test_agent_4.json | 6 + arena/zze.json | 6 + .../backend/server/routers/integrations.py | 236 + .../frontend/src/components/ui/use-toast.ts | 191 + autogpts/autogpt/.coveragerc | 2 + autogpts/autogpt/.devcontainer/Dockerfile | 13 + .../autogpt/.devcontainer/devcontainer.json | 56 + .../autogpt/.devcontainer/docker-compose.yml | 12 + autogpts/autogpt/.dockerignore | 14 + autogpts/autogpt/.env.template | 239 + autogpts/autogpt/.envrc | 4 + autogpts/autogpt/.flake8 | 11 + autogpts/autogpt/.gitattributes | 5 + autogpts/autogpt/.gitignore | 169 + autogpts/autogpt/.pre-commit-config.yaml | 42 + autogpts/autogpt/.sourcery.yaml | 71 + autogpts/autogpt/BULLETIN.md | 13 + autogpts/autogpt/Dockerfile | 56 + autogpts/autogpt/README.md | 180 + .../autogpt/agbenchmark_config/.gitignore | 3 + .../autogpt/agbenchmark_config/__init__.py | 0 .../agbenchmark_config/analyze_reports.py | 143 + .../autogpt/agbenchmark_config/benchmarks.py | 85 + .../autogpt/agbenchmark_config/config.json | 8 + autogpts/autogpt/autogpt.bat | 27 + autogpts/autogpt/autogpt.sh | 29 + autogpts/autogpt/autogpt/__init__.py | 7 + autogpts/autogpt/autogpt/__main__.py | 5 + .../autogpt/agent_factory/configurators.py | 124 + .../autogpt/agent_factory/generators.py | 38 + .../agent_factory/profile_generator.py | 248 + .../autogpt/autogpt/agent_manager/__init__.py | 3 + .../autogpt/agent_manager/agent_manager.py | 45 + autogpts/autogpt/autogpt/agents/__init__.py | 4 + autogpts/autogpt/autogpt/agents/agent.py | 320 + autogpts/autogpt/autogpt/agents/base.py | 396 + .../agents/features/agent_file_manager.py | 102 + .../autogpt/agents/features/context.py | 82 + .../autogpt/agents/features/watchdog.py | 76 + .../agents/prompt_strategies/one_shot.py | 475 ++ .../autogpt/agents/utils/exceptions.py | 64 + .../autogpt/agents/utils/prompt_scratchpad.py | 108 + autogpts/autogpt/autogpt/app/__init__.py | 6 + .../autogpt/app/agent_protocol_server.py | 492 ++ autogpts/autogpt/autogpt/app/cli.py | 287 + autogpts/autogpt/autogpt/app/configurator.py | 165 + autogpts/autogpt/autogpt/app/main.py | 810 ++ autogpts/autogpt/autogpt/app/setup.py | 207 + autogpts/autogpt/autogpt/app/spinner.py | 70 + autogpts/autogpt/autogpt/app/telemetry.py | 64 + autogpts/autogpt/autogpt/app/utils.py | 280 + autogpts/autogpt/autogpt/command_decorator.py | 70 + autogpts/autogpt/autogpt/commands/__init__.py | 9 + .../autogpt/autogpt/commands/decorators.py | 82 + .../autogpt/autogpt/commands/execute_code.py | 387 + .../autogpt/autogpt/commands/file_context.py | 131 + .../autogpt/commands/file_operations.py | 241 + .../autogpt/commands/file_operations_utils.py | 151 + .../autogpt/commands/git_operations.py | 58 + .../autogpt/autogpt/commands/image_gen.py | 212 + autogpts/autogpt/autogpt/commands/system.py | 69 + autogpts/autogpt/autogpt/commands/times.py | 10 + .../autogpt/commands/user_interaction.py | 32 + .../autogpt/autogpt/commands/web_search.py | 169 + .../autogpt/autogpt/commands/web_selenium.py | 379 + autogpts/autogpt/autogpt/config/__init__.py | 14 + .../autogpt/autogpt/config/ai_directives.py | 48 + autogpts/autogpt/autogpt/config/ai_profile.py | 68 + autogpts/autogpt/autogpt/config/config.py | 343 + .../autogpt/core/ARCHITECTURE_NOTES.md | 271 + autogpts/autogpt/autogpt/core/README.md | 92 + autogpts/autogpt/autogpt/core/__init__.py | 0 .../autogpt/autogpt/core/ability/__init__.py | 18 + autogpts/autogpt/autogpt/core/ability/base.py | 88 + .../autogpt/core/ability/builtins/__init__.py | 12 + .../ability/builtins/create_new_ability.py | 107 + .../core/ability/builtins/file_operations.py | 170 + .../ability/builtins/query_language_model.py | 66 + .../autogpt/autogpt/core/ability/schema.py | 30 + .../autogpt/autogpt/core/ability/simple.py | 97 + .../autogpt/autogpt/core/agent/__init__.py | 9 + autogpts/autogpt/autogpt/core/agent/base.py | 26 + autogpts/autogpt/autogpt/core/agent/simple.py | 404 + .../autogpt/core/configuration/__init__.py | 14 + .../autogpt/core/configuration/schema.py | 351 + .../autogpt/autogpt/core/memory/__init__.py | 9 + autogpts/autogpt/autogpt/core/memory/base.py | 13 + .../autogpt/autogpt/core/memory/simple.py | 47 + .../autogpt/autogpt/core/planning/__init__.py | 11 + .../autogpt/autogpt/core/planning/base.py | 54 + .../planning/prompt_strategies/__init__.py | 12 + .../prompt_strategies/initial_plan.py | 204 + .../prompt_strategies/name_and_goals.py | 147 + .../prompt_strategies/next_ability.py | 201 + .../autogpt/autogpt/core/planning/schema.py | 48 + .../autogpt/autogpt/core/planning/simple.py | 188 + .../autogpt/core/planning/templates.py | 84 + .../autogpt/autogpt/core/plugin/__init__.py | 6 + autogpts/autogpt/autogpt/core/plugin/base.py | 162 + .../autogpt/autogpt/core/plugin/simple.py | 75 + autogpts/autogpt/autogpt/core/poetry.lock | 1345 +++ .../autogpt/core/prompting/__init__.py | 8 + .../autogpt/autogpt/core/prompting/base.py | 23 + .../autogpt/autogpt/core/prompting/schema.py | 34 + .../autogpt/autogpt/core/prompting/utils.py | 9 + autogpts/autogpt/autogpt/core/pyproject.toml | 77 + .../autogpt/autogpt/core/resource/__init__.py | 15 + .../core/resource/model_providers/__init__.py | 65 + .../core/resource/model_providers/openai.py | 879 ++ .../core/resource/model_providers/schema.py | 359 + .../autogpt/autogpt/core/resource/schema.py | 76 + .../autogpt/autogpt/core/runner/__init__.py | 3 + .../autogpt/core/runner/cli_app/__init__.py | 0 .../autogpt/core/runner/cli_app/cli.py | 47 + .../autogpt/core/runner/cli_app/main.py | 74 + .../core/runner/cli_web_app/__init__.py | 0 .../autogpt/core/runner/cli_web_app/cli.py | 58 + .../runner/cli_web_app/server/__init__.py | 0 .../core/runner/cli_web_app/server/api.py | 99 + .../core/runner/client_lib/__init__.py | 0 .../runner/client_lib/logging/__init__.py | 22 + .../core/runner/client_lib/logging/config.py | 82 + .../core/runner/client_lib/logging/helpers.py | 23 + .../autogpt/core/runner/client_lib/parser.py | 45 + .../core/runner/client_lib/settings.py | 14 + .../client_lib/shared_click_commands.py | 19 + .../autogpt/core/runner/client_lib/utils.py | 62 + .../autogpt/autogpt/core/utils/json_schema.py | 142 + .../autogpt/autogpt/core/utils/json_utils.py | 93 + .../autogpt/core/workspace/__init__.py | 9 + .../autogpt/autogpt/core/workspace/base.py | 70 + .../autogpt/autogpt/core/workspace/simple.py | 194 + .../autogpt/autogpt/file_storage/__init__.py | 44 + autogpts/autogpt/autogpt/file_storage/base.py | 204 + autogpts/autogpt/autogpt/file_storage/gcs.py | 213 + .../autogpt/autogpt/file_storage/local.py | 139 + autogpts/autogpt/autogpt/file_storage/s3.py | 265 + .../autogpt/autogpt/llm/providers/__init__.py | 0 .../autogpt/autogpt/llm/providers/openai.py | 28 + autogpts/autogpt/autogpt/logs/__init__.py | 26 + autogpts/autogpt/autogpt/logs/config.py | 240 + autogpts/autogpt/autogpt/logs/filters.py | 12 + autogpts/autogpt/autogpt/logs/formatters.py | 53 + autogpts/autogpt/autogpt/logs/handlers.py | 81 + autogpts/autogpt/autogpt/logs/helpers.py | 70 + autogpts/autogpt/autogpt/logs/log_cycle.py | 81 + autogpts/autogpt/autogpt/logs/utils.py | 9 + .../autogpt/autogpt/memory/vector/__init__.py | 156 + .../autogpt/memory/vector/memory_item.py | 285 + .../memory/vector/providers/__init__.py | 7 + .../autogpt/memory/vector/providers/base.py | 79 + .../memory/vector/providers/json_file.py | 92 + .../memory/vector/providers/no_memory.py | 36 + .../autogpt/autogpt/memory/vector/utils.py | 98 + autogpts/autogpt/autogpt/models/__init__.py | 0 .../autogpt/autogpt/models/action_history.py | 234 + .../autogpt/models/base_open_ai_plugin.py | 251 + autogpts/autogpt/autogpt/models/command.py | 72 + .../autogpt/models/command_parameter.py | 17 + .../autogpt/models/command_registry.py | 212 + .../autogpt/autogpt/models/context_item.py | 95 + autogpts/autogpt/autogpt/plugins/__init__.py | 330 + .../autogpt/autogpt/plugins/plugin_config.py | 11 + .../autogpt/autogpt/plugins/plugins_config.py | 118 + .../autogpt/autogpt/processing/__init__.py | 0 autogpts/autogpt/autogpt/processing/html.py | 33 + autogpts/autogpt/autogpt/processing/text.py | 318 + autogpts/autogpt/autogpt/prompts/__init__.py | 0 autogpts/autogpt/autogpt/prompts/prompt.py | 5 + autogpts/autogpt/autogpt/prompts/utils.py | 11 + autogpts/autogpt/autogpt/singleton.py | 16 + autogpts/autogpt/autogpt/speech/__init__.py | 4 + autogpts/autogpt/autogpt/speech/base.py | 54 + .../autogpt/autogpt/speech/eleven_labs.py | 93 + autogpts/autogpt/autogpt/speech/gtts.py | 24 + autogpts/autogpt/autogpt/speech/macos_tts.py | 23 + autogpts/autogpt/autogpt/speech/say.py | 79 + .../autogpt/speech/stream_elements_speech.py | 54 + .../autogpt/autogpt/url_utils/__init__.py | 0 .../autogpt/autogpt/url_utils/validators.py | 92 + autogpts/autogpt/autogpt/utils.py | 19 + autogpts/autogpt/azure.yaml.template | 7 + .../autogpt/challenges_already_beaten.json | 3 + autogpts/autogpt/codecov.yml | 18 + autogpts/autogpt/data/.keep | 0 autogpts/autogpt/docker-compose.yml | 49 + autogpts/autogpt/hooks/post-checkout | 2 + autogpts/autogpt/hooks/post-rewrite | 4 + autogpts/autogpt/plugin.png | Bin 0 -> 33356 bytes autogpts/autogpt/plugins/.keep | 0 autogpts/autogpt/poetry.lock | 7266 +++++++++++++++++ autogpts/autogpt/prompt_settings.yaml | 17 + autogpts/autogpt/pyproject.toml | 163 + autogpts/autogpt/run | 10 + autogpts/autogpt/run_benchmark | 9 + autogpts/autogpt/scripts/__init__.py | 0 .../autogpt/scripts/check_requirements.py | 38 + .../autogpt/scripts/install_plugin_deps.py | 66 + autogpts/autogpt/setup | 8 + autogpts/autogpt/tests/__init__.py | 0 autogpts/autogpt/tests/conftest.py | 147 + autogpts/autogpt/tests/context.py | 7 + .../autogpt/tests/integration/__init__.py | 0 .../tests/integration/agent_factory.py | 56 + .../tests/integration/memory/__init__.py | 0 .../memory/_test_json_file_memory.py | 126 + .../tests/integration/memory/conftest.py | 17 + .../autogpt/tests/integration/memory/utils.py | 44 + .../tests/integration/test_execute_code.py | 127 + .../tests/integration/test_image_gen.py | 235 + .../autogpt/tests/integration/test_setup.py | 70 + .../tests/integration/test_web_selenium.py | 18 + autogpts/autogpt/tests/mocks/__init__.py | 0 autogpts/autogpt/tests/mocks/mock_commands.py | 29 + autogpts/autogpt/tests/unit/__init__.py | 0 .../tests/unit/data/test_ai_config.yaml | 5 + .../Auto-GPT-Plugin-Test-master.zip | Bin 0 -> 15385 bytes .../test_plugins/auto_gpt_guanaco/__init__.py | 274 + .../unit/models/test_base_open_api_plugin.py | 81 + .../autogpt/tests/unit/test_ai_profile.py | 71 + autogpts/autogpt/tests/unit/test_commands.py | 239 + autogpts/autogpt/tests/unit/test_config.py | 185 + .../tests/unit/test_file_operations.py | 254 + .../tests/unit/test_gcs_file_storage.py | 200 + .../autogpt/tests/unit/test_git_commands.py | 44 + .../autogpt/tests/unit/test_json_utils.py | 93 + .../tests/unit/test_local_file_storage.py | 211 + autogpts/autogpt/tests/unit/test_logs.py | 36 + autogpts/autogpt/tests/unit/test_plugins.py | 125 + .../autogpt/tests/unit/test_prompt_config.py | 42 + .../tests/unit/test_s3_file_storage.py | 195 + autogpts/autogpt/tests/unit/test_spinner.py | 35 + .../tests/unit/test_text_file_parsers.py | 170 + .../autogpt/tests/unit/test_url_validation.py | 157 + autogpts/autogpt/tests/unit/test_utils.py | 332 + .../autogpt/tests/unit/test_web_search.py | 136 + autogpts/autogpt/tests/utils.py | 10 + autogpts/autogpt/tests/vcr/__init__.py | 77 + autogpts/autogpt/tests/vcr/vcr_filter.py | 110 + 628 files changed, 35510 insertions(+) create mode 100644 .flake8 create mode 100644 .github/workflows/arena-intake.yml create mode 100644 .github/workflows/autogpt-ci.yml create mode 100644 .github/workflows/autogpt-docker-cache-clean.yml create mode 100644 .github/workflows/autogpt-docker-ci.yml create mode 100644 .github/workflows/autogpt-docker-release.yml create mode 100644 .github/workflows/autogpts-benchmark.yml create mode 100644 .github/workflows/autogpts-ci.yml create mode 100644 .github/workflows/benchmark-ci.yml create mode 100644 .github/workflows/benchmark_publish_package.yml create mode 100644 .github/workflows/close-stale-issues.yml create mode 100644 .github/workflows/frontend-ci.yml create mode 100644 .github/workflows/hackathon.yml create mode 100644 .github/workflows/pr-label.yml create mode 100644 CLI-USAGE.md create mode 100644 QUICKSTART.md create mode 100644 SECURITY.md create mode 100644 TROUBLESHOOTING.md create mode 100644 arena/480bot.json create mode 100644 arena/AGENT_GORDON.json create mode 100644 arena/AGENT_JARVIS.json create mode 100644 arena/AI.json create mode 100644 arena/AKBAgent.json create mode 100644 arena/ASSISTANT.json create mode 100644 arena/AUTO_ENGINEER.json create mode 100644 arena/AUTO_GPT_JON001.json create mode 100644 arena/Adtractive_Agent.json create mode 100644 arena/AgGPT.json create mode 100644 arena/AgentJPark.json create mode 100644 arena/AgentKD.json create mode 100644 arena/Ahmad.json create mode 100644 arena/Alfred.json create mode 100644 arena/AlphaCISO.json create mode 100644 arena/AndersLensway.json create mode 100644 arena/AntlerTestGPT.json create mode 100644 arena/AppleGPT.json create mode 100644 arena/AquaAgent.json create mode 100644 arena/ArtistManagerGPT.json create mode 100644 arena/AskOpie.json create mode 100644 arena/Auto.json create mode 100644 arena/AutoGPT-ariel.json create mode 100644 arena/AutoGPT2.json create mode 100644 arena/AutoGenius.json create mode 100644 arena/AutoTDD.json create mode 100644 arena/AutoTestGenerator.json create mode 100644 arena/AwareAgent.json create mode 100644 arena/Bagi_agent.json create mode 100644 arena/BanglaSgAgent.json create mode 100644 arena/Baptiste.json create mode 100644 arena/Bravo06.json create mode 100644 arena/Brillante-AI.json create mode 100644 arena/Bunny.json create mode 100644 arena/CCAgent.json create mode 100644 arena/CES-GPT.json create mode 100644 arena/CISLERK.json create mode 100644 arena/CONNECTBOT.json create mode 100644 arena/CYNO_AGENT.json create mode 100644 arena/ChadGPT.json create mode 100644 arena/ChrisGPT.json create mode 100644 arena/CodeAutoGPT.json create mode 100644 arena/CreaitorMarketing.json create mode 100644 arena/CurieAssistant.json create mode 100644 arena/DE.json create mode 100644 arena/DavidsAgent.json create mode 100644 arena/Derpmaster.json create mode 100644 arena/DevOpsAgent.json create mode 100644 arena/Drench.json create mode 100644 arena/Eduardo.json create mode 100644 arena/EmbeddedAg.json create mode 100644 arena/EnglishTestpaperAgent.json create mode 100644 arena/ExampleAgent.json create mode 100644 arena/FLASH.json create mode 100644 arena/FactoryGPT.json create mode 100644 arena/FcsummerGPT.json create mode 100644 arena/FynAgent.json create mode 100644 arena/GG.json create mode 100644 arena/GPTTest.json create mode 100644 arena/GameSoundGPT.json create mode 100644 arena/GeorgeGPT.json create mode 100644 arena/Granger.json create mode 100644 arena/HACKATHON.json create mode 100644 arena/HMD2.json create mode 100644 arena/Heisenberg.json create mode 100644 arena/HekolcuAutoGPT.json create mode 100644 arena/HuitzilAiAgent.json create mode 100644 arena/Hypeman.json create mode 100644 arena/IncredibubbleTea.json create mode 100644 arena/JackGPT.json create mode 100644 arena/Jarvis.json create mode 100644 arena/JarvisAgent.json create mode 100644 arena/Jean-Michel.json create mode 100644 arena/Job_GPT.json create mode 100644 arena/JoshAgent1.json create mode 100644 arena/KnowledgeExtractor.json create mode 100644 arena/LAWYER_EMAD.json create mode 100644 arena/LHRobot.json create mode 100644 arena/Lab49Agent.json create mode 100644 arena/LbAgent.json create mode 100644 arena/LegalAgent.json create mode 100644 arena/Light_Agent.json create mode 100644 arena/LinuzGPT.json create mode 100644 arena/Lirum.json create mode 100644 arena/MANU.json create mode 100644 arena/MEGATRON.json create mode 100644 arena/MOBILE.json create mode 100644 arena/Maharathi.json create mode 100644 arena/MangoAI.json create mode 100644 arena/MangoAgent-3.json create mode 100644 arena/MangoAgent-4.json create mode 100644 arena/MarketResearcherEduRob.json create mode 100644 arena/Marx.json create mode 100644 arena/Mary.json create mode 100644 arena/Melang.json create mode 100644 arena/Miao.json create mode 100644 arena/MindwareGPT.json create mode 100644 arena/Mira.json create mode 100644 arena/MoTS.json create mode 100644 arena/MojoBurrito.json create mode 100644 arena/MyAgent.json create mode 100644 arena/MyExample.json create mode 100644 arena/MyExampleAgent.json create mode 100644 arena/MyFirstAgent.json create mode 100644 arena/MyFistAgent.json create mode 100644 arena/MyTestAgent.json create mode 100644 arena/N.json create mode 100644 arena/NASAssistant2.json create mode 100644 arena/NHAN_BOT.json create mode 100644 arena/NadeemAgent.json create mode 100644 arena/NanAutoGPT.json create mode 100644 arena/NoobSupreme.json create mode 100644 arena/NumberOne.json create mode 100644 arena/Orange.json create mode 100644 arena/PAgentAI.json create mode 100644 arena/Pacific.json create mode 100644 arena/ParalegalAgent.json create mode 100644 arena/Pelle.json create mode 100644 arena/Portalen.json create mode 100644 arena/Pumu2_agent.json create mode 100644 arena/Q.json create mode 100644 arena/QA_AGENT.json create mode 100644 arena/QuantumQuill.json create mode 100644 arena/RAGOptimizer.json create mode 100644 arena/RFPScanner.json create mode 100644 arena/RONNIN.json create mode 100644 arena/RagsToRiches.json create mode 100644 arena/RandomVampirePictureBot.json create mode 100644 arena/Raslebot.json create mode 100644 arena/ResearchAgent.json create mode 100644 arena/RosterAgent.json create mode 100644 arena/SaasWebDev.json create mode 100644 arena/SaveAsPDF2.json create mode 100644 arena/ShiviBot.json create mode 100644 arena/SkorkobaniecAgent.json create mode 100644 arena/SmartAgent.json create mode 100644 arena/SmartGPT.json create mode 100644 arena/SouAgent.json create mode 100644 arena/Stragegy_Steve.json create mode 100644 arena/Susan.json create mode 100644 arena/TEST_TPK.json create mode 100644 arena/TLGPT.json create mode 100644 arena/TMarafon.json create mode 100644 arena/TRAVIS.json create mode 100644 arena/TeslaBot.json create mode 100644 arena/Tessa_AutoGPT_agent.json create mode 100644 arena/TestLbAgent.json create mode 100644 arena/TheAgency.json create mode 100644 arena/TheAgent.json create mode 100644 arena/TraceLLMAgent.json create mode 100644 arena/UGYUJI.json create mode 100644 arena/UTC-Crew.json create mode 100644 arena/UmaruAgent.json create mode 100644 arena/UniAgent.json create mode 100644 arena/Verkiezingsprogrammas.json create mode 100644 arena/WRITER.json create mode 100644 arena/WYC.json create mode 100644 arena/WarlockAgent.json create mode 100644 arena/WeatherInformer.json create mode 100644 arena/WiseAgent.json create mode 100644 arena/XXY.json create mode 100644 arena/YOU.json create mode 100644 arena/YoudaoAutoGPT.json create mode 100644 arena/YoutubePost_agent.json create mode 100644 arena/Yui3.json create mode 100644 arena/Yutan_agent.json create mode 100644 arena/ZJgpt.json create mode 100644 arena/Zeus.json create mode 100644 arena/ZhaoJianAutoGPT.json create mode 100644 arena/ZoeyGPT.json create mode 100644 arena/Zoidberg.json create mode 100644 arena/aWOL.json create mode 100644 arena/a_reverent_heart.json create mode 100644 arena/accidental-agent.json create mode 100644 arena/actor_tester.json create mode 100644 arena/admariner.json create mode 100644 arena/ag1.json create mode 100644 arena/agent2.json create mode 100644 arena/agentSmith.json create mode 100644 arena/agent_2.json create mode 100644 arena/agentgpt.json create mode 100644 arena/agsCehAgent.json create mode 100644 arena/ai_assistant.json create mode 100644 arena/aiaudit.json create mode 100644 arena/aiwowo.json create mode 100644 arena/aixiaoxin.json create mode 100644 arena/akela.json create mode 100644 arena/analystgpt.json create mode 100644 arena/arbetsformedlingen.json create mode 100644 arena/assistant1.json create mode 100644 arena/autoai.json create mode 100644 arena/autocoder.json create mode 100644 arena/autogbd.json create mode 100644 arena/autogpt-hackathon2.json create mode 100644 arena/autogpt.json create mode 100644 arena/autogpt_hackathon.json create mode 100644 arena/autogpt_hackathon1.json create mode 100644 arena/autogpt_warlock.json create mode 100644 arena/autogptagent.json create mode 100644 arena/avengaGPT.json create mode 100644 arena/babe_perphorator_.json create mode 100644 arena/baby_agent.json create mode 100644 arena/bait.json create mode 100644 arena/beyond.json create mode 100644 arena/bigman.json create mode 100644 arena/billy.json create mode 100644 arena/bingoTesting.json create mode 100644 arena/bosaeed_agent.json create mode 100644 arena/bot.json create mode 100644 arena/bot01.json create mode 100644 arena/buddy.json create mode 100644 arena/burt.json create mode 100644 arena/business.json create mode 100644 arena/byl.json create mode 100644 arena/career-agent.json create mode 100644 arena/caud.json create mode 100644 arena/ccace.json create mode 100644 arena/chappigpt.json create mode 100644 arena/chappyAi.json create mode 100644 arena/chatgpt_taller.json create mode 100644 arena/chengshu.json create mode 100644 arena/chenzo.json create mode 100644 arena/cislerk2.json create mode 100644 arena/codebutler.json create mode 100644 arena/coder_first.json create mode 100644 arena/contentstrategy.json create mode 100644 arena/cssupdater.json create mode 100644 arena/da-agent.json create mode 100644 arena/date-buffer.json create mode 100644 arena/davidtest1.json create mode 100644 arena/davidtestagent.json create mode 100644 arena/dda.json create mode 100644 arena/decision-maker.json create mode 100644 arena/dev_agent.json create mode 100644 arena/devagent.json create mode 100644 arena/dive2code.json create mode 100644 arena/dndagent.json create mode 100644 arena/dy_agent.json create mode 100644 arena/dy_agent2.json create mode 100644 arena/easn.json create mode 100644 arena/eddy.json create mode 100644 arena/ekc911_agent.json create mode 100644 arena/engineer.json create mode 100644 arena/evlyn.json create mode 100644 arena/evo-ninja.json create mode 100644 arena/evo.json create mode 100644 arena/faran.json create mode 100644 arena/first-agent.json create mode 100644 arena/foobar.json create mode 100644 arena/frankgarcia.json create mode 100644 arena/fritzgpt.json create mode 100644 arena/fst.json create mode 100644 arena/fuzz_gen.json create mode 100644 arena/gaby_agent.json create mode 100644 arena/gen_fuzz.json create mode 100644 arena/ghostcoder.json create mode 100644 arena/gipity.json create mode 100644 arena/gpt-dev-engineer-agent.json create mode 100644 arena/gpt-eng-forge.json create mode 100644 arena/gpt-engineer.json create mode 100644 arena/gpt_for_beans.json create mode 100644 arena/hall_oto.json create mode 100644 arena/han.json create mode 100644 arena/happy_guy.json create mode 100644 arena/hello.json create mode 100644 arena/hodri.json create mode 100644 arena/houxe.json create mode 100644 arena/icode.json create mode 100644 arena/iku2.json create mode 100644 arena/illynet.json create mode 100644 arena/illynetV2.json create mode 100644 arena/illyx1.json create mode 100644 arena/info-retrieval.json create mode 100644 arena/ivangpt_agent.json create mode 100644 arena/jarvis2.json create mode 100644 arena/jarvis3.json create mode 100644 arena/jaxbob1.json create mode 100644 arena/job_apply.json create mode 100644 arena/jonesyboi.json create mode 100644 arena/justwondering.json create mode 100644 arena/kingmitch.json create mode 100644 arena/lawk.json create mode 100644 arena/lcdegpt.json create mode 100644 arena/letst.json create mode 100644 arena/letstest.json create mode 100644 arena/lilAgent.json create mode 100644 arena/linggong.json create mode 100644 arena/liuzh.json create mode 100644 arena/ltzAgent.json create mode 100644 arena/martingpt.json create mode 100644 arena/medical-agent.json create mode 100644 arena/metware.json create mode 100644 arena/miniAgent.json create mode 100644 arena/minister_agent.json create mode 100644 arena/misslu.json create mode 100644 arena/mljar-agent.json create mode 100644 arena/momo.json create mode 100644 arena/monthly_summary.json create mode 100644 arena/mrSabelotodo.json create mode 100644 arena/myGPT.json create mode 100644 arena/my_AutoGPT.json create mode 100644 arena/my_fx_agent.json create mode 100644 arena/my_gpt.json create mode 100644 arena/mygent.json create mode 100644 arena/nawalj.json create mode 100644 arena/newAgent.json create mode 100644 arena/northfork.json create mode 100644 arena/od_agent_1.json create mode 100644 arena/operationAgent.json create mode 100644 arena/personal-al-website.json create mode 100644 arena/piGPT.json create mode 100644 arena/pipeline.json create mode 100644 arena/podcast_agent.json create mode 100644 arena/potato.json create mode 100644 arena/project_assitant.json create mode 100644 arena/project_master.json create mode 100644 arena/project_review.json create mode 100644 arena/prometheus.json create mode 100644 arena/proudgpt.json create mode 100644 arena/qinghu3.json create mode 100644 arena/ra.json create mode 100644 arena/ra1.json create mode 100644 arena/rachael.json create mode 100644 arena/raindrop.json create mode 100644 arena/researchGPT.json create mode 100644 arena/researchGPT2.json create mode 100644 arena/research_analyst.json create mode 100644 arena/robita.json create mode 100644 arena/robot.json create mode 100644 arena/searchagent.json create mode 100644 arena/set.json create mode 100644 arena/sgpt.json create mode 100644 arena/shivi.json create mode 100644 arena/sky.json create mode 100644 arena/smith.json create mode 100644 arena/songyalei.json create mode 100644 arena/sql.json create mode 100644 arena/stefan.json create mode 100644 arena/stockAgent.json create mode 100644 arena/swarms.json create mode 100644 arena/tdev.json create mode 100644 arena/teacher.json create mode 100644 arena/test-tpk.json create mode 100644 arena/test.json create mode 100644 arena/test1.json create mode 100644 arena/testGPT.json create mode 100644 arena/thebestagent.json create mode 100644 arena/theone.json create mode 100644 arena/tiffGPT.json create mode 100644 arena/trend_agent.json create mode 100644 arena/umiuni_agent.json create mode 100644 arena/uply.json create mode 100644 arena/url-to-lead.json create mode 100644 arena/v-gpt.json create mode 100644 arena/victor2-0.json create mode 100644 arena/web_developer.json create mode 100644 arena/webagent.json create mode 100644 arena/webgeek.json create mode 100644 arena/wedding-planner.json create mode 100644 arena/woohoo_agent.json create mode 100644 arena/wyjagent.json create mode 100644 arena/xmly.json create mode 100644 arena/xq_agent.json create mode 100644 arena/xt0m-GPT.json create mode 100644 arena/xtest.json create mode 100644 arena/yarbis.json create mode 100644 arena/zaheer.json create mode 100644 arena/zcb.json create mode 100644 arena/zczc.json create mode 100644 arena/zhizhi.json create mode 100644 arena/zlipknot_1.json create mode 100644 arena/zlipknot_test_agent_4.json create mode 100644 arena/zze.json create mode 100644 autogpt_platform/backend/backend/server/routers/integrations.py create mode 100644 autogpt_platform/frontend/src/components/ui/use-toast.ts create mode 100644 autogpts/autogpt/.coveragerc create mode 100644 autogpts/autogpt/.devcontainer/Dockerfile create mode 100644 autogpts/autogpt/.devcontainer/devcontainer.json create mode 100644 autogpts/autogpt/.devcontainer/docker-compose.yml create mode 100644 autogpts/autogpt/.dockerignore create mode 100644 autogpts/autogpt/.env.template create mode 100644 autogpts/autogpt/.envrc create mode 100644 autogpts/autogpt/.flake8 create mode 100644 autogpts/autogpt/.gitattributes create mode 100644 autogpts/autogpt/.gitignore create mode 100644 autogpts/autogpt/.pre-commit-config.yaml create mode 100644 autogpts/autogpt/.sourcery.yaml create mode 100644 autogpts/autogpt/BULLETIN.md create mode 100644 autogpts/autogpt/Dockerfile create mode 100644 autogpts/autogpt/README.md create mode 100644 autogpts/autogpt/agbenchmark_config/.gitignore create mode 100644 autogpts/autogpt/agbenchmark_config/__init__.py create mode 100644 autogpts/autogpt/agbenchmark_config/analyze_reports.py create mode 100644 autogpts/autogpt/agbenchmark_config/benchmarks.py create mode 100644 autogpts/autogpt/agbenchmark_config/config.json create mode 100644 autogpts/autogpt/autogpt.bat create mode 100644 autogpts/autogpt/autogpt.sh create mode 100644 autogpts/autogpt/autogpt/__init__.py create mode 100644 autogpts/autogpt/autogpt/__main__.py create mode 100644 autogpts/autogpt/autogpt/agent_factory/configurators.py create mode 100644 autogpts/autogpt/autogpt/agent_factory/generators.py create mode 100644 autogpts/autogpt/autogpt/agent_factory/profile_generator.py create mode 100644 autogpts/autogpt/autogpt/agent_manager/__init__.py create mode 100644 autogpts/autogpt/autogpt/agent_manager/agent_manager.py create mode 100644 autogpts/autogpt/autogpt/agents/__init__.py create mode 100644 autogpts/autogpt/autogpt/agents/agent.py create mode 100644 autogpts/autogpt/autogpt/agents/base.py create mode 100644 autogpts/autogpt/autogpt/agents/features/agent_file_manager.py create mode 100644 autogpts/autogpt/autogpt/agents/features/context.py create mode 100644 autogpts/autogpt/autogpt/agents/features/watchdog.py create mode 100644 autogpts/autogpt/autogpt/agents/prompt_strategies/one_shot.py create mode 100644 autogpts/autogpt/autogpt/agents/utils/exceptions.py create mode 100644 autogpts/autogpt/autogpt/agents/utils/prompt_scratchpad.py create mode 100644 autogpts/autogpt/autogpt/app/__init__.py create mode 100644 autogpts/autogpt/autogpt/app/agent_protocol_server.py create mode 100644 autogpts/autogpt/autogpt/app/cli.py create mode 100644 autogpts/autogpt/autogpt/app/configurator.py create mode 100644 autogpts/autogpt/autogpt/app/main.py create mode 100644 autogpts/autogpt/autogpt/app/setup.py create mode 100644 autogpts/autogpt/autogpt/app/spinner.py create mode 100644 autogpts/autogpt/autogpt/app/telemetry.py create mode 100644 autogpts/autogpt/autogpt/app/utils.py create mode 100644 autogpts/autogpt/autogpt/command_decorator.py create mode 100644 autogpts/autogpt/autogpt/commands/__init__.py create mode 100644 autogpts/autogpt/autogpt/commands/decorators.py create mode 100644 autogpts/autogpt/autogpt/commands/execute_code.py create mode 100644 autogpts/autogpt/autogpt/commands/file_context.py create mode 100644 autogpts/autogpt/autogpt/commands/file_operations.py create mode 100644 autogpts/autogpt/autogpt/commands/file_operations_utils.py create mode 100644 autogpts/autogpt/autogpt/commands/git_operations.py create mode 100644 autogpts/autogpt/autogpt/commands/image_gen.py create mode 100644 autogpts/autogpt/autogpt/commands/system.py create mode 100644 autogpts/autogpt/autogpt/commands/times.py create mode 100644 autogpts/autogpt/autogpt/commands/user_interaction.py create mode 100644 autogpts/autogpt/autogpt/commands/web_search.py create mode 100644 autogpts/autogpt/autogpt/commands/web_selenium.py create mode 100644 autogpts/autogpt/autogpt/config/__init__.py create mode 100644 autogpts/autogpt/autogpt/config/ai_directives.py create mode 100644 autogpts/autogpt/autogpt/config/ai_profile.py create mode 100644 autogpts/autogpt/autogpt/config/config.py create mode 100644 autogpts/autogpt/autogpt/core/ARCHITECTURE_NOTES.md create mode 100644 autogpts/autogpt/autogpt/core/README.md create mode 100644 autogpts/autogpt/autogpt/core/__init__.py create mode 100644 autogpts/autogpt/autogpt/core/ability/__init__.py create mode 100644 autogpts/autogpt/autogpt/core/ability/base.py create mode 100644 autogpts/autogpt/autogpt/core/ability/builtins/__init__.py create mode 100644 autogpts/autogpt/autogpt/core/ability/builtins/create_new_ability.py create mode 100644 autogpts/autogpt/autogpt/core/ability/builtins/file_operations.py create mode 100644 autogpts/autogpt/autogpt/core/ability/builtins/query_language_model.py create mode 100644 autogpts/autogpt/autogpt/core/ability/schema.py create mode 100644 autogpts/autogpt/autogpt/core/ability/simple.py create mode 100644 autogpts/autogpt/autogpt/core/agent/__init__.py create mode 100644 autogpts/autogpt/autogpt/core/agent/base.py create mode 100644 autogpts/autogpt/autogpt/core/agent/simple.py create mode 100644 autogpts/autogpt/autogpt/core/configuration/__init__.py create mode 100644 autogpts/autogpt/autogpt/core/configuration/schema.py create mode 100644 autogpts/autogpt/autogpt/core/memory/__init__.py create mode 100644 autogpts/autogpt/autogpt/core/memory/base.py create mode 100644 autogpts/autogpt/autogpt/core/memory/simple.py create mode 100644 autogpts/autogpt/autogpt/core/planning/__init__.py create mode 100644 autogpts/autogpt/autogpt/core/planning/base.py create mode 100644 autogpts/autogpt/autogpt/core/planning/prompt_strategies/__init__.py create mode 100644 autogpts/autogpt/autogpt/core/planning/prompt_strategies/initial_plan.py create mode 100644 autogpts/autogpt/autogpt/core/planning/prompt_strategies/name_and_goals.py create mode 100644 autogpts/autogpt/autogpt/core/planning/prompt_strategies/next_ability.py create mode 100644 autogpts/autogpt/autogpt/core/planning/schema.py create mode 100644 autogpts/autogpt/autogpt/core/planning/simple.py create mode 100644 autogpts/autogpt/autogpt/core/planning/templates.py create mode 100644 autogpts/autogpt/autogpt/core/plugin/__init__.py create mode 100644 autogpts/autogpt/autogpt/core/plugin/base.py create mode 100644 autogpts/autogpt/autogpt/core/plugin/simple.py create mode 100644 autogpts/autogpt/autogpt/core/poetry.lock create mode 100644 autogpts/autogpt/autogpt/core/prompting/__init__.py create mode 100644 autogpts/autogpt/autogpt/core/prompting/base.py create mode 100644 autogpts/autogpt/autogpt/core/prompting/schema.py create mode 100644 autogpts/autogpt/autogpt/core/prompting/utils.py create mode 100644 autogpts/autogpt/autogpt/core/pyproject.toml create mode 100644 autogpts/autogpt/autogpt/core/resource/__init__.py create mode 100644 autogpts/autogpt/autogpt/core/resource/model_providers/__init__.py create mode 100644 autogpts/autogpt/autogpt/core/resource/model_providers/openai.py create mode 100644 autogpts/autogpt/autogpt/core/resource/model_providers/schema.py create mode 100644 autogpts/autogpt/autogpt/core/resource/schema.py create mode 100644 autogpts/autogpt/autogpt/core/runner/__init__.py create mode 100644 autogpts/autogpt/autogpt/core/runner/cli_app/__init__.py create mode 100644 autogpts/autogpt/autogpt/core/runner/cli_app/cli.py create mode 100644 autogpts/autogpt/autogpt/core/runner/cli_app/main.py create mode 100644 autogpts/autogpt/autogpt/core/runner/cli_web_app/__init__.py create mode 100644 autogpts/autogpt/autogpt/core/runner/cli_web_app/cli.py create mode 100644 autogpts/autogpt/autogpt/core/runner/cli_web_app/server/__init__.py create mode 100644 autogpts/autogpt/autogpt/core/runner/cli_web_app/server/api.py create mode 100644 autogpts/autogpt/autogpt/core/runner/client_lib/__init__.py create mode 100644 autogpts/autogpt/autogpt/core/runner/client_lib/logging/__init__.py create mode 100644 autogpts/autogpt/autogpt/core/runner/client_lib/logging/config.py create mode 100644 autogpts/autogpt/autogpt/core/runner/client_lib/logging/helpers.py create mode 100644 autogpts/autogpt/autogpt/core/runner/client_lib/parser.py create mode 100644 autogpts/autogpt/autogpt/core/runner/client_lib/settings.py create mode 100644 autogpts/autogpt/autogpt/core/runner/client_lib/shared_click_commands.py create mode 100644 autogpts/autogpt/autogpt/core/runner/client_lib/utils.py create mode 100644 autogpts/autogpt/autogpt/core/utils/json_schema.py create mode 100644 autogpts/autogpt/autogpt/core/utils/json_utils.py create mode 100644 autogpts/autogpt/autogpt/core/workspace/__init__.py create mode 100644 autogpts/autogpt/autogpt/core/workspace/base.py create mode 100644 autogpts/autogpt/autogpt/core/workspace/simple.py create mode 100644 autogpts/autogpt/autogpt/file_storage/__init__.py create mode 100644 autogpts/autogpt/autogpt/file_storage/base.py create mode 100644 autogpts/autogpt/autogpt/file_storage/gcs.py create mode 100644 autogpts/autogpt/autogpt/file_storage/local.py create mode 100644 autogpts/autogpt/autogpt/file_storage/s3.py create mode 100644 autogpts/autogpt/autogpt/llm/providers/__init__.py create mode 100644 autogpts/autogpt/autogpt/llm/providers/openai.py create mode 100644 autogpts/autogpt/autogpt/logs/__init__.py create mode 100644 autogpts/autogpt/autogpt/logs/config.py create mode 100644 autogpts/autogpt/autogpt/logs/filters.py create mode 100644 autogpts/autogpt/autogpt/logs/formatters.py create mode 100644 autogpts/autogpt/autogpt/logs/handlers.py create mode 100644 autogpts/autogpt/autogpt/logs/helpers.py create mode 100644 autogpts/autogpt/autogpt/logs/log_cycle.py create mode 100644 autogpts/autogpt/autogpt/logs/utils.py create mode 100644 autogpts/autogpt/autogpt/memory/vector/__init__.py create mode 100644 autogpts/autogpt/autogpt/memory/vector/memory_item.py create mode 100644 autogpts/autogpt/autogpt/memory/vector/providers/__init__.py create mode 100644 autogpts/autogpt/autogpt/memory/vector/providers/base.py create mode 100644 autogpts/autogpt/autogpt/memory/vector/providers/json_file.py create mode 100644 autogpts/autogpt/autogpt/memory/vector/providers/no_memory.py create mode 100644 autogpts/autogpt/autogpt/memory/vector/utils.py create mode 100644 autogpts/autogpt/autogpt/models/__init__.py create mode 100644 autogpts/autogpt/autogpt/models/action_history.py create mode 100644 autogpts/autogpt/autogpt/models/base_open_ai_plugin.py create mode 100644 autogpts/autogpt/autogpt/models/command.py create mode 100644 autogpts/autogpt/autogpt/models/command_parameter.py create mode 100644 autogpts/autogpt/autogpt/models/command_registry.py create mode 100644 autogpts/autogpt/autogpt/models/context_item.py create mode 100644 autogpts/autogpt/autogpt/plugins/__init__.py create mode 100644 autogpts/autogpt/autogpt/plugins/plugin_config.py create mode 100644 autogpts/autogpt/autogpt/plugins/plugins_config.py create mode 100644 autogpts/autogpt/autogpt/processing/__init__.py create mode 100644 autogpts/autogpt/autogpt/processing/html.py create mode 100644 autogpts/autogpt/autogpt/processing/text.py create mode 100644 autogpts/autogpt/autogpt/prompts/__init__.py create mode 100644 autogpts/autogpt/autogpt/prompts/prompt.py create mode 100644 autogpts/autogpt/autogpt/prompts/utils.py create mode 100644 autogpts/autogpt/autogpt/singleton.py create mode 100644 autogpts/autogpt/autogpt/speech/__init__.py create mode 100644 autogpts/autogpt/autogpt/speech/base.py create mode 100644 autogpts/autogpt/autogpt/speech/eleven_labs.py create mode 100644 autogpts/autogpt/autogpt/speech/gtts.py create mode 100644 autogpts/autogpt/autogpt/speech/macos_tts.py create mode 100644 autogpts/autogpt/autogpt/speech/say.py create mode 100644 autogpts/autogpt/autogpt/speech/stream_elements_speech.py create mode 100644 autogpts/autogpt/autogpt/url_utils/__init__.py create mode 100644 autogpts/autogpt/autogpt/url_utils/validators.py create mode 100644 autogpts/autogpt/autogpt/utils.py create mode 100644 autogpts/autogpt/azure.yaml.template create mode 100644 autogpts/autogpt/challenges_already_beaten.json create mode 100644 autogpts/autogpt/codecov.yml create mode 100644 autogpts/autogpt/data/.keep create mode 100644 autogpts/autogpt/docker-compose.yml create mode 100644 autogpts/autogpt/hooks/post-checkout create mode 100644 autogpts/autogpt/hooks/post-rewrite create mode 100644 autogpts/autogpt/plugin.png create mode 100644 autogpts/autogpt/plugins/.keep create mode 100644 autogpts/autogpt/poetry.lock create mode 100644 autogpts/autogpt/prompt_settings.yaml create mode 100644 autogpts/autogpt/pyproject.toml create mode 100644 autogpts/autogpt/run create mode 100644 autogpts/autogpt/run_benchmark create mode 100644 autogpts/autogpt/scripts/__init__.py create mode 100644 autogpts/autogpt/scripts/check_requirements.py create mode 100644 autogpts/autogpt/scripts/install_plugin_deps.py create mode 100644 autogpts/autogpt/setup create mode 100644 autogpts/autogpt/tests/__init__.py create mode 100644 autogpts/autogpt/tests/conftest.py create mode 100644 autogpts/autogpt/tests/context.py create mode 100644 autogpts/autogpt/tests/integration/__init__.py create mode 100644 autogpts/autogpt/tests/integration/agent_factory.py create mode 100644 autogpts/autogpt/tests/integration/memory/__init__.py create mode 100644 autogpts/autogpt/tests/integration/memory/_test_json_file_memory.py create mode 100644 autogpts/autogpt/tests/integration/memory/conftest.py create mode 100644 autogpts/autogpt/tests/integration/memory/utils.py create mode 100644 autogpts/autogpt/tests/integration/test_execute_code.py create mode 100644 autogpts/autogpt/tests/integration/test_image_gen.py create mode 100644 autogpts/autogpt/tests/integration/test_setup.py create mode 100644 autogpts/autogpt/tests/integration/test_web_selenium.py create mode 100644 autogpts/autogpt/tests/mocks/__init__.py create mode 100644 autogpts/autogpt/tests/mocks/mock_commands.py create mode 100644 autogpts/autogpt/tests/unit/__init__.py create mode 100644 autogpts/autogpt/tests/unit/data/test_ai_config.yaml create mode 100644 autogpts/autogpt/tests/unit/data/test_plugins/Auto-GPT-Plugin-Test-master.zip create mode 100644 autogpts/autogpt/tests/unit/data/test_plugins/auto_gpt_guanaco/__init__.py create mode 100644 autogpts/autogpt/tests/unit/models/test_base_open_api_plugin.py create mode 100644 autogpts/autogpt/tests/unit/test_ai_profile.py create mode 100644 autogpts/autogpt/tests/unit/test_commands.py create mode 100644 autogpts/autogpt/tests/unit/test_config.py create mode 100644 autogpts/autogpt/tests/unit/test_file_operations.py create mode 100644 autogpts/autogpt/tests/unit/test_gcs_file_storage.py create mode 100644 autogpts/autogpt/tests/unit/test_git_commands.py create mode 100644 autogpts/autogpt/tests/unit/test_json_utils.py create mode 100644 autogpts/autogpt/tests/unit/test_local_file_storage.py create mode 100644 autogpts/autogpt/tests/unit/test_logs.py create mode 100644 autogpts/autogpt/tests/unit/test_plugins.py create mode 100644 autogpts/autogpt/tests/unit/test_prompt_config.py create mode 100644 autogpts/autogpt/tests/unit/test_s3_file_storage.py create mode 100644 autogpts/autogpt/tests/unit/test_spinner.py create mode 100644 autogpts/autogpt/tests/unit/test_text_file_parsers.py create mode 100644 autogpts/autogpt/tests/unit/test_url_validation.py create mode 100644 autogpts/autogpt/tests/unit/test_utils.py create mode 100644 autogpts/autogpt/tests/unit/test_web_search.py create mode 100644 autogpts/autogpt/tests/utils.py create mode 100644 autogpts/autogpt/tests/vcr/__init__.py create mode 100644 autogpts/autogpt/tests/vcr/vcr_filter.py diff --git a/.flake8 b/.flake8 new file mode 100644 index 000000000000..5b964172464a --- /dev/null +++ b/.flake8 @@ -0,0 +1,12 @@ +[flake8] +max-line-length = 88 +exclude = + .tox, + __pycache__, + *.pyc, + .env + venv*/*, + .venv/*, + reports/*, + dist/*, + data/*, diff --git a/.github/workflows/arena-intake.yml b/.github/workflows/arena-intake.yml new file mode 100644 index 000000000000..a77540471054 --- /dev/null +++ b/.github/workflows/arena-intake.yml @@ -0,0 +1,169 @@ +name: Arena intake + +on: + # We recommend `pull_request_target` so that github secrets are available. + # In `pull_request` we wouldn't be able to change labels of fork PRs + pull_request_target: + types: [ opened, synchronize ] + paths: + - 'arena/**' + +jobs: + check: + permissions: + pull-requests: write + runs-on: ubuntu-latest + steps: + - name: Checkout PR + uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha }} + + - name: Check Arena entry + uses: actions/github-script@v7 + with: + script: | + console.log('⚙️ Setting up...'); + + const fs = require('fs'); + const path = require('path'); + + const pr = context.payload.pull_request; + const isFork = pr.head.repo.fork; + + console.log('🔄️ Fetching PR diff metadata...'); + const prFilesChanged = (await github.rest.pulls.listFiles({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: pr.number, + })).data; + console.debug(prFilesChanged); + const arenaFilesChanged = prFilesChanged.filter( + ({ filename: file }) => file.startsWith('arena/') && file.endsWith('.json') + ); + const hasChangesInAutogptsFolder = prFilesChanged.some( + ({ filename }) => filename.startsWith('autogpts/') + ); + + console.log(`🗒️ ${arenaFilesChanged.length} arena entries affected`); + console.debug(arenaFilesChanged); + if (arenaFilesChanged.length === 0) { + // If no files in `arena/` are changed, this job does not need to run. + return; + } + + let close = false; + let flagForManualCheck = false; + let issues = []; + + if (isFork) { + if (arenaFilesChanged.length > 1) { + // Impacting multiple entries in `arena/` is not allowed + issues.push('This pull request impacts multiple arena entries'); + } + if (hasChangesInAutogptsFolder) { + // PRs that include the custom agent are generally not allowed + issues.push( + 'This pull request includes changes in `autogpts/`.\n' + + 'Please make sure to only submit your arena entry (`arena/*.json`), ' + + 'and not to accidentally include your custom agent itself.' + ); + } + } + + if (arenaFilesChanged.length === 1) { + const newArenaFile = arenaFilesChanged[0] + const newArenaFileName = path.basename(newArenaFile.filename) + console.log(`🗒️ Arena entry in PR: ${newArenaFile}`); + + if (newArenaFile.status != 'added') { + flagForManualCheck = true; + } + + if (pr.mergeable != false) { + const newArenaEntry = JSON.parse(fs.readFileSync(newArenaFile.filename)); + const allArenaFiles = await (await glob.create('arena/*.json')).glob(); + console.debug(newArenaEntry); + + console.log(`➡️ Checking ${newArenaFileName} against existing entries...`); + for (const file of allArenaFiles) { + const existingEntryName = path.basename(file); + + if (existingEntryName === newArenaFileName) { + continue; + } + + console.debug(`Checking against ${existingEntryName}...`); + + const arenaEntry = JSON.parse(fs.readFileSync(file)); + if (arenaEntry.github_repo_url === newArenaEntry.github_repo_url) { + console.log(`⚠️ Duplicate detected: ${existingEntryName}`); + issues.push( + `The \`github_repo_url\` specified in __${newArenaFileName}__ ` + + `already exists in __${existingEntryName}__. ` + + `This PR will be closed as duplicate.` + ) + close = true; + } + } + } else { + console.log('⚠️ PR has conflicts'); + issues.push( + `__${newArenaFileName}__ conflicts with existing entry with the same name` + ) + close = true; + } + } // end if (arenaFilesChanged.length === 1) + + console.log('🏁 Finished checking against existing entries'); + + if (issues.length == 0) { + console.log('✅ No issues detected'); + if (flagForManualCheck) { + console.log('🤔 Requesting review from maintainers...'); + await github.rest.pulls.requestReviewers({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: pr.number, + reviewers: ['Pwuts'], + // team_reviewers: ['maintainers'], // doesn't work: https://stackoverflow.com/a/64977184/4751645 + }); + } else { + console.log('➡️ Approving PR...'); + await github.rest.pulls.createReview({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: pr.number, + event: 'APPROVE', + }); + } + } else { + console.log(`⚠️ ${issues.length} issues detected`); + + console.log('➡️ Posting comment indicating issues...'); + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: pr.number, + body: `Our automation found one or more issues with this submission:\n` + + issues.map(i => `- ${i.replace('\n', '\n ')}`).join('\n'), + }); + + console.log("➡️ Applying label 'invalid'..."); + await github.rest.issues.addLabels({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: pr.number, + labels: ['invalid'], + }); + + if (close) { + console.log('➡️ Auto-closing PR...'); + await github.rest.pulls.update({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: pr.number, + state: 'closed', + }); + } + } diff --git a/.github/workflows/autogpt-ci.yml b/.github/workflows/autogpt-ci.yml new file mode 100644 index 000000000000..bc3858e88d79 --- /dev/null +++ b/.github/workflows/autogpt-ci.yml @@ -0,0 +1,296 @@ +name: AutoGPT Python CI + +on: + push: + branches: [ master, development, ci-test* ] + paths: + - '.github/workflows/autogpt-ci.yml' + - 'autogpts/autogpt/**' + - '!autogpts/autogpt/tests/vcr_cassettes' + pull_request: + branches: [ master, development, release-* ] + paths: + - '.github/workflows/autogpt-ci.yml' + - 'autogpts/autogpt/**' + - '!autogpts/autogpt/tests/vcr_cassettes' + +concurrency: + group: ${{ format('autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }} + cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }} + +defaults: + run: + shell: bash + working-directory: autogpts/autogpt + +jobs: + lint: + runs-on: ubuntu-latest + env: + min-python-version: "3.10" + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set up Python ${{ env.min-python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ env.min-python-version }} + + - id: get_date + name: Get date + run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT + + - name: Set up Python dependency cache + uses: actions/cache@v4 + with: + path: ~/.cache/pypoetry + key: ${{ runner.os }}-poetry-${{ hashFiles('autogpts/autogpt/pyproject.toml') }}-${{ steps.get_date.outputs.date }} + + - name: Install Python dependencies + run: | + curl -sSL https://install.python-poetry.org | python3 - + poetry install + + - name: Lint with flake8 + run: poetry run flake8 + + - name: Check black formatting + run: poetry run black . --check + if: success() || failure() + + - name: Check isort formatting + run: poetry run isort . --check + if: success() || failure() + + # - name: Check mypy formatting + # run: poetry run mypy + # if: success() || failure() + + # - name: Check for unused imports and pass statements + # run: | + # cmd="autoflake --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring autogpt tests" + # poetry run $cmd --check || (echo "You have unused imports or pass statements, please run '${cmd} --in-place'" && exit 1) + + test: + permissions: + contents: read + timeout-minutes: 30 + strategy: + fail-fast: false + matrix: + python-version: ["3.10"] + platform-os: [ubuntu, macos, macos-arm64, windows] + runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }} + + steps: + # Quite slow on macOS (2~4 minutes to set up Docker) + # - name: Set up Docker (macOS) + # if: runner.os == 'macOS' + # uses: crazy-max/ghaction-setup-docker@v3 + + - name: Start MinIO service (Linux) + if: runner.os == 'Linux' + working-directory: '.' + run: | + docker pull minio/minio:edge-cicd + docker run -d -p 9000:9000 minio/minio:edge-cicd + + - name: Start MinIO service (macOS) + if: runner.os == 'macOS' + working-directory: ${{ runner.temp }} + run: | + brew install minio/stable/minio + mkdir data + minio server ./data & + + # No MinIO on Windows: + # - Windows doesn't support running Linux Docker containers + # - It doesn't seem possible to start background processes on Windows. They are + # killed after the step returns. + # See: https://github.com/actions/runner/issues/598#issuecomment-2011890429 + + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + submodules: true + + - name: Configure git user Auto-GPT-Bot + run: | + git config --global user.name "Auto-GPT-Bot" + git config --global user.email "github-bot@agpt.co" + + - name: Checkout cassettes + if: ${{ startsWith(github.event_name, 'pull_request') }} + env: + PR_BASE: ${{ github.event.pull_request.base.ref }} + PR_BRANCH: ${{ github.event.pull_request.head.ref }} + PR_AUTHOR: ${{ github.event.pull_request.user.login }} + run: | + cassette_branch="${PR_AUTHOR}-${PR_BRANCH}" + cassette_base_branch="${PR_BASE}" + cd tests/vcr_cassettes + + if ! git ls-remote --exit-code --heads origin $cassette_base_branch ; then + cassette_base_branch="master" + fi + + if git ls-remote --exit-code --heads origin $cassette_branch ; then + git fetch origin $cassette_branch + git fetch origin $cassette_base_branch + + git checkout $cassette_branch + + # Pick non-conflicting cassette updates from the base branch + git merge --no-commit --strategy-option=ours origin/$cassette_base_branch + echo "Using cassettes from mirror branch '$cassette_branch'," \ + "synced to upstream branch '$cassette_base_branch'." + else + git checkout -b $cassette_branch + echo "Branch '$cassette_branch' does not exist in cassette submodule." \ + "Using cassettes from '$cassette_base_branch'." + fi + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - id: get_date + name: Get date + run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT + + - name: Set up Python dependency cache + # On Windows, unpacking cached dependencies takes longer than just installing them + if: runner.os != 'Windows' + uses: actions/cache@v4 + with: + path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }} + key: poetry-${{ runner.os }}-${{ hashFiles('autogpts/autogpt/poetry.lock') }} + + - name: Install Poetry (Unix) + if: runner.os != 'Windows' + run: | + curl -sSL https://install.python-poetry.org | python3 - + + if [ "${{ runner.os }}" = "macOS" ]; then + PATH="$HOME/.local/bin:$PATH" + echo "$HOME/.local/bin" >> $GITHUB_PATH + fi + + - name: Install Poetry (Windows) + if: runner.os == 'Windows' + shell: pwsh + run: | + (Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python - + + $env:PATH += ";$env:APPDATA\Python\Scripts" + echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH + + - name: Install Python dependencies + run: poetry install + + - name: Run pytest with coverage + run: | + poetry run pytest -vv \ + --cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \ + --numprocesses=logical --durations=10 \ + tests/unit tests/integration + env: + CI: true + PLAIN_OUTPUT: True + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + S3_ENDPOINT_URL: ${{ runner.os != 'Windows' && 'http://127.0.0.1:9000' || '' }} + AWS_ACCESS_KEY_ID: minioadmin + AWS_SECRET_ACCESS_KEY: minioadmin + + - name: Upload coverage reports to Codecov + uses: codecov/codecov-action@v4 + with: + token: ${{ secrets.CODECOV_TOKEN }} + flags: autogpt-agent,${{ runner.os }} + + - id: setup_git_auth + name: Set up git token authentication + # Cassettes may be pushed even when tests fail + if: success() || failure() + run: | + config_key="http.${{ github.server_url }}/.extraheader" + if [ "${{ runner.os }}" = 'macOS' ]; then + base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64) + else + base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64 -w0) + fi + + git config "$config_key" \ + "Authorization: Basic $base64_pat" + + cd tests/vcr_cassettes + git config "$config_key" \ + "Authorization: Basic $base64_pat" + + echo "config_key=$config_key" >> $GITHUB_OUTPUT + + - id: push_cassettes + name: Push updated cassettes + # For pull requests, push updated cassettes even when tests fail + if: github.event_name == 'push' || (! github.event.pull_request.head.repo.fork && (success() || failure())) + env: + PR_BRANCH: ${{ github.event.pull_request.head.ref }} + PR_AUTHOR: ${{ github.event.pull_request.user.login }} + run: | + if [ "${{ startsWith(github.event_name, 'pull_request') }}" = "true" ]; then + is_pull_request=true + cassette_branch="${PR_AUTHOR}-${PR_BRANCH}" + else + cassette_branch="${{ github.ref_name }}" + fi + + cd tests/vcr_cassettes + # Commit & push changes to cassettes if any + if ! git diff --quiet; then + git add . + git commit -m "Auto-update cassettes" + git push origin HEAD:$cassette_branch + if [ ! $is_pull_request ]; then + cd ../.. + git add tests/vcr_cassettes + git commit -m "Update cassette submodule" + git push origin HEAD:$cassette_branch + fi + echo "updated=true" >> $GITHUB_OUTPUT + else + echo "updated=false" >> $GITHUB_OUTPUT + echo "No cassette changes to commit" + fi + + - name: Post Set up git token auth + if: steps.setup_git_auth.outcome == 'success' + run: | + git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}' + git submodule foreach git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}' + + - name: Apply "behaviour change" label and comment on PR + if: ${{ startsWith(github.event_name, 'pull_request') }} + run: | + PR_NUMBER="${{ github.event.pull_request.number }}" + TOKEN="${{ secrets.PAT_REVIEW }}" + REPO="${{ github.repository }}" + + if [[ "${{ steps.push_cassettes.outputs.updated }}" == "true" ]]; then + echo "Adding label and comment..." + echo $TOKEN | gh auth login --with-token + gh issue edit $PR_NUMBER --add-label "behaviour change" + gh issue comment $PR_NUMBER --body "You changed AutoGPT's behaviour on ${{ runner.os }}. The cassettes have been updated and will be merged to the submodule when this Pull Request gets merged." + fi + + - name: Upload logs to artifact + if: always() + uses: actions/upload-artifact@v4 + with: + name: test-logs + path: autogpts/autogpt/logs/ diff --git a/.github/workflows/autogpt-docker-cache-clean.yml b/.github/workflows/autogpt-docker-cache-clean.yml new file mode 100644 index 000000000000..22c940128d35 --- /dev/null +++ b/.github/workflows/autogpt-docker-cache-clean.yml @@ -0,0 +1,59 @@ +name: Purge Auto-GPT Docker CI cache + +on: + schedule: + - cron: 20 4 * * 1,4 + +env: + BASE_BRANCH: development + IMAGE_NAME: auto-gpt + +jobs: + build: + runs-on: ubuntu-latest + strategy: + matrix: + build-type: [release, dev] + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - id: build + name: Build image + uses: docker/build-push-action@v5 + with: + context: autogpts/autogpt + build-args: BUILD_TYPE=${{ matrix.build-type }} + load: true # save to docker images + # use GHA cache as read-only + cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max + + - name: Generate build report + env: + event_name: ${{ github.event_name }} + event_ref: ${{ github.event.schedule }} + + build_type: ${{ matrix.build-type }} + + prod_branch: master + dev_branch: development + repository: ${{ github.repository }} + base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }} + + current_ref: ${{ github.ref_name }} + commit_hash: ${{ github.sha }} + source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.sha) }} + push_forced_label: + + new_commits_json: ${{ null }} + compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }} + + github_context_json: ${{ toJSON(github) }} + job_env_json: ${{ toJSON(env) }} + vars_json: ${{ toJSON(vars) }} + + run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY + continue-on-error: true diff --git a/.github/workflows/autogpt-docker-ci.yml b/.github/workflows/autogpt-docker-ci.yml new file mode 100644 index 000000000000..4ef63547e795 --- /dev/null +++ b/.github/workflows/autogpt-docker-ci.yml @@ -0,0 +1,165 @@ +name: AutoGPT Docker CI + +on: + push: + branches: [ master, development ] + paths: + - '.github/workflows/autogpt-docker-ci.yml' + - 'autogpts/autogpt/**' + - '!autogpts/autogpt/tests/vcr_cassettes' + pull_request: + branches: [ master, development, release-* ] + paths: + - '.github/workflows/autogpt-docker-ci.yml' + - 'autogpts/autogpt/**' + - '!autogpts/autogpt/tests/vcr_cassettes' + +concurrency: + group: ${{ format('autogpt-docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }} + cancel-in-progress: ${{ github.event_name == 'pull_request' }} + +defaults: + run: + working-directory: autogpts/autogpt + +env: + IMAGE_NAME: auto-gpt + DEPLOY_IMAGE_NAME: ${{ secrets.DOCKER_USER && format('{0}/', secrets.DOCKER_USER) || '' }}auto-gpt + DEV_IMAGE_TAG: latest-dev + +jobs: + build: + runs-on: ubuntu-latest + strategy: + matrix: + build-type: [release, dev] + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - if: runner.debug + run: | + ls -al + du -hs * + + - id: build + name: Build image + uses: docker/build-push-action@v5 + with: + context: autogpts/autogpt + build-args: BUILD_TYPE=${{ matrix.build-type }} + tags: ${{ env.IMAGE_NAME }} + labels: GIT_REVISION=${{ github.sha }} + load: true # save to docker images + # cache layers in GitHub Actions cache to speed up builds + cache-from: type=gha,scope=autogpt-docker-${{ matrix.build-type }} + cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max + + - name: Generate build report + env: + event_name: ${{ github.event_name }} + event_ref: ${{ github.event.ref }} + event_ref_type: ${{ github.event.ref}} + + build_type: ${{ matrix.build-type }} + + prod_branch: master + dev_branch: development + repository: ${{ github.repository }} + base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }} + + current_ref: ${{ github.ref_name }} + commit_hash: ${{ github.event.after }} + source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }} + push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }} + + new_commits_json: ${{ toJSON(github.event.commits) }} + compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }} + + github_context_json: ${{ toJSON(github) }} + job_env_json: ${{ toJSON(env) }} + vars_json: ${{ toJSON(vars) }} + + run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY + working-directory: ./ + continue-on-error: true + + test: + runs-on: ubuntu-latest + timeout-minutes: 10 + + services: + minio: + image: minio/minio:edge-cicd + options: > + --name=minio + --health-interval=10s --health-timeout=5s --health-retries=3 + --health-cmd="curl -f http://localhost:9000/minio/health/live" + + steps: + - name: Check out repository + uses: actions/checkout@v4 + with: + submodules: true + + - if: github.event_name == 'push' + name: Log in to Docker hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_USER }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - id: build + name: Build image + uses: docker/build-push-action@v5 + with: + context: autogpts/autogpt + build-args: BUILD_TYPE=dev # include pytest + tags: > + ${{ env.IMAGE_NAME }}, + ${{ env.DEPLOY_IMAGE_NAME }}:${{ env.DEV_IMAGE_TAG }} + labels: GIT_REVISION=${{ github.sha }} + load: true # save to docker images + # cache layers in GitHub Actions cache to speed up builds + cache-from: type=gha,scope=autogpt-docker-dev + cache-to: type=gha,scope=autogpt-docker-dev,mode=max + + - id: test + name: Run tests + env: + CI: true + PLAIN_OUTPUT: True + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + S3_ENDPOINT_URL: http://minio:9000 + AWS_ACCESS_KEY_ID: minioadmin + AWS_SECRET_ACCESS_KEY: minioadmin + run: | + set +e + docker run --env CI --env OPENAI_API_KEY \ + --network container:minio \ + --env S3_ENDPOINT_URL --env AWS_ACCESS_KEY_ID --env AWS_SECRET_ACCESS_KEY \ + --entrypoint poetry ${{ env.IMAGE_NAME }} run \ + pytest -v --cov=autogpt --cov-branch --cov-report term-missing \ + --numprocesses=4 --durations=10 \ + tests/unit tests/integration 2>&1 | tee test_output.txt + + test_failure=${PIPESTATUS[0]} + + cat << $EOF >> $GITHUB_STEP_SUMMARY + # Tests $([ $test_failure = 0 ] && echo '✅' || echo '❌') + \`\`\` + $(cat test_output.txt) + \`\`\` + $EOF + + exit $test_failure + + - if: github.event_name == 'push' && github.ref_name == 'master' + name: Push image to Docker Hub + run: docker push ${{ env.DEPLOY_IMAGE_NAME }}:${{ env.DEV_IMAGE_TAG }} diff --git a/.github/workflows/autogpt-docker-release.yml b/.github/workflows/autogpt-docker-release.yml new file mode 100644 index 000000000000..f45a63a2af1e --- /dev/null +++ b/.github/workflows/autogpt-docker-release.yml @@ -0,0 +1,91 @@ +name: AutoGPT Docker Release + +on: + release: + types: [ published, edited ] + + workflow_dispatch: + inputs: + no_cache: + type: boolean + description: 'Build from scratch, without using cached layers' + +defaults: + run: + working-directory: autogpts/autogpt + +env: + IMAGE_NAME: auto-gpt + DEPLOY_IMAGE_NAME: ${{ secrets.DOCKER_USER }}/auto-gpt + +jobs: + build: + if: startsWith(github.ref, 'refs/tags/autogpt-') + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Log in to Docker hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_USER }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + # slashes are not allowed in image tags, but can appear in git branch or tag names + - id: sanitize_tag + name: Sanitize image tag + run: | + tag=${raw_tag//\//-} + echo tag=${tag#autogpt-} >> $GITHUB_OUTPUT + env: + raw_tag: ${{ github.ref_name }} + + - id: build + name: Build image + uses: docker/build-push-action@v5 + with: + context: autogpts/autogpt + build-args: BUILD_TYPE=release + load: true # save to docker images + # push: true # TODO: uncomment when this issue is fixed: https://github.com/moby/buildkit/issues/1555 + tags: > + ${{ env.IMAGE_NAME }}, + ${{ env.DEPLOY_IMAGE_NAME }}:latest, + ${{ env.DEPLOY_IMAGE_NAME }}:${{ steps.sanitize_tag.outputs.tag }} + labels: GIT_REVISION=${{ github.sha }} + + # cache layers in GitHub Actions cache to speed up builds + cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=autogpt-docker-release + cache-to: type=gha,scope=autogpt-docker-release,mode=max + + - name: Push image to Docker Hub + run: docker push --all-tags ${{ env.DEPLOY_IMAGE_NAME }} + + - name: Generate build report + env: + event_name: ${{ github.event_name }} + event_ref: ${{ github.event.ref }} + event_ref_type: ${{ github.event.ref}} + inputs_no_cache: ${{ inputs.no_cache }} + + prod_branch: master + dev_branch: development + repository: ${{ github.repository }} + base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }} + + ref_type: ${{ github.ref_type }} + current_ref: ${{ github.ref_name }} + commit_hash: ${{ github.sha }} + source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }} + + github_context_json: ${{ toJSON(github) }} + job_env_json: ${{ toJSON(env) }} + vars_json: ${{ toJSON(vars) }} + + run: .github/workflows/scripts/docker-release-summary.sh >> $GITHUB_STEP_SUMMARY + working-directory: ./ + continue-on-error: true diff --git a/.github/workflows/autogpts-benchmark.yml b/.github/workflows/autogpts-benchmark.yml new file mode 100644 index 000000000000..fb1cb6f08eae --- /dev/null +++ b/.github/workflows/autogpts-benchmark.yml @@ -0,0 +1,97 @@ +name: AutoGPTs Nightly Benchmark + +on: + workflow_dispatch: + schedule: + - cron: '0 2 * * *' + +jobs: + benchmark: + permissions: + contents: write + runs-on: ubuntu-latest + strategy: + matrix: + agent-name: [ autogpt ] + fail-fast: false + timeout-minutes: 120 + env: + min-python-version: '3.10' + REPORTS_BRANCH: data/benchmark-reports + REPORTS_FOLDER: ${{ format('benchmark/reports/{0}', matrix.agent-name) }} + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + submodules: true + + - name: Set up Python ${{ env.min-python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ env.min-python-version }} + + - name: Install Poetry + run: curl -sSL https://install.python-poetry.org | python - + + - name: Prepare reports folder + run: mkdir -p ${{ env.REPORTS_FOLDER }} + + - run: poetry -C benchmark install + + - name: Benchmark ${{ matrix.agent-name }} + run: | + ./run agent start ${{ matrix.agent-name }} + cd autogpts/${{ matrix.agent-name }} + + set +e # Do not quit on non-zero exit codes + poetry run agbenchmark run -N 3 \ + --test=ReadFile \ + --test=BasicRetrieval --test=RevenueRetrieval2 \ + --test=CombineCsv --test=LabelCsv --test=AnswerQuestionCombineCsv \ + --test=UrlShortener --test=TicTacToe --test=Battleship \ + --test=WebArenaTask_0 --test=WebArenaTask_21 --test=WebArenaTask_124 \ + --test=WebArenaTask_134 --test=WebArenaTask_163 + + # Convert exit code 1 (some challenges failed) to exit code 0 + if [ $? -eq 0 ] || [ $? -eq 1 ]; then + exit 0 + else + exit $? + fi + env: + AGENT_NAME: ${{ matrix.agent-name }} + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + REQUESTS_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt + REPORTS_FOLDER: ${{ format('../../{0}', env.REPORTS_FOLDER) }} # account for changed workdir + + TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci + TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }} + + - name: Push reports to data branch + run: | + # BODGE: Remove success_rate.json and regression_tests.json to avoid conflicts on checkout + rm ${{ env.REPORTS_FOLDER }}/*.json + + # Find folder with newest (untracked) report in it + report_subfolder=$(find ${{ env.REPORTS_FOLDER }} -type f -name 'report.json' \ + | xargs -I {} dirname {} \ + | xargs -I {} git ls-files --others --exclude-standard {} \ + | xargs -I {} dirname {} \ + | sort -u) + json_report_file="$report_subfolder/report.json" + + # Convert JSON report to Markdown + markdown_report_file="$report_subfolder/report.md" + poetry -C benchmark run benchmark/reports/format.py "$json_report_file" > "$markdown_report_file" + cat "$markdown_report_file" >> $GITHUB_STEP_SUMMARY + + git config --global user.name 'GitHub Actions' + git config --global user.email 'github-actions@agpt.co' + git fetch origin ${{ env.REPORTS_BRANCH }}:${{ env.REPORTS_BRANCH }} \ + && git checkout ${{ env.REPORTS_BRANCH }} \ + || git checkout --orphan ${{ env.REPORTS_BRANCH }} + git reset --hard + git add ${{ env.REPORTS_FOLDER }} + git commit -m "Benchmark report for ${{ matrix.agent-name }} @ $(date +'%Y-%m-%d')" \ + && git push origin ${{ env.REPORTS_BRANCH }} diff --git a/.github/workflows/autogpts-ci.yml b/.github/workflows/autogpts-ci.yml new file mode 100644 index 000000000000..19f8c5ab2816 --- /dev/null +++ b/.github/workflows/autogpts-ci.yml @@ -0,0 +1,69 @@ +name: AutoGPTs smoke test CI + +on: + workflow_dispatch: + schedule: + - cron: '0 8 * * *' + push: + branches: [ master, development, ci-test* ] + paths: + - '.github/workflows/autogpts-ci.yml' + - 'autogpts/**' + - 'benchmark/**' + - 'run' + - 'cli.py' + - 'setup.py' + - '!**/*.md' + pull_request: + branches: [ master, development, release-* ] + paths: + - '.github/workflows/autogpts-ci.yml' + - 'autogpts/**' + - 'benchmark/**' + - 'run' + - 'cli.py' + - 'setup.py' + - '!**/*.md' + +jobs: + run-tests: + runs-on: ubuntu-latest + strategy: + matrix: + agent-name: [ autogpt, forge ] + fail-fast: false + timeout-minutes: 20 + env: + min-python-version: '3.10' + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + submodules: true + + - name: Set up Python ${{ env.min-python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ env.min-python-version }} + + - name: Install Poetry + working-directory: ./autogpts/${{ matrix.agent-name }}/ + run: | + curl -sSL https://install.python-poetry.org | python - + + - name: Run regression tests + run: | + ./run agent start ${{ matrix.agent-name }} + cd autogpts/${{ matrix.agent-name }} + poetry run agbenchmark --mock --test=BasicRetrieval --test=Battleship --test=WebArenaTask_0 + poetry run agbenchmark --test=WriteFile + env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + AGENT_NAME: ${{ matrix.agent-name }} + REQUESTS_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt + HELICONE_CACHE_ENABLED: false + HELICONE_PROPERTY_AGENT: ${{ matrix.agent-name }} + REPORTS_FOLDER: ${{ format('../../reports/{0}', matrix.agent-name) }} + TELEMETRY_ENVIRONMENT: autogpt-ci + TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }} diff --git a/.github/workflows/benchmark-ci.yml b/.github/workflows/benchmark-ci.yml new file mode 100644 index 000000000000..88c5750ac13f --- /dev/null +++ b/.github/workflows/benchmark-ci.yml @@ -0,0 +1,141 @@ +name: Benchmark CI + +on: + push: + branches: [ master, development, ci-test* ] + paths: + - 'benchmark/**' + - .github/workflows/benchmark-ci.yml + - '!benchmark/reports/**' + pull_request: + branches: [ master, development, release-* ] + paths: + - 'benchmark/**' + - '!benchmark/reports/**' + - .github/workflows/benchmark-ci.yml + +env: + min-python-version: '3.10' + +jobs: + lint: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set up Python ${{ env.min-python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ env.min-python-version }} + + - id: get_date + name: Get date + working-directory: ./benchmark/ + run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT + + - name: Install Poetry + working-directory: ./benchmark/ + run: | + curl -sSL https://install.python-poetry.org | python - + + - name: Install dependencies + working-directory: ./benchmark/ + run: | + export POETRY_VIRTUALENVS_IN_PROJECT=true + poetry install -vvv + + - name: Lint with flake8 + working-directory: ./benchmark/ + run: poetry run flake8 + + - name: Check black formatting + working-directory: ./benchmark/ + run: poetry run black . --exclude test.py --check + if: success() || failure() + + - name: Check isort formatting + working-directory: ./benchmark/ + run: poetry run isort . --check + if: success() || failure() + + - name: Check for unused imports and pass statements + working-directory: ./benchmark/ + run: | + cmd="poetry run autoflake --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring agbenchmark" + $cmd --check || (echo "You have unused imports or pass statements, please run '${cmd} --in-place'" && exit 1) + if: success() || failure() + + tests-agbenchmark: + runs-on: ubuntu-latest + strategy: + matrix: + agent-name: [ forge ] + fail-fast: false + timeout-minutes: 20 + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + submodules: true + + - name: Set up Python ${{ env.min-python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ env.min-python-version }} + + - name: Install Poetry + working-directory: ./autogpts/${{ matrix.agent-name }}/ + run: | + curl -sSL https://install.python-poetry.org | python - + + - name: Run regression tests + run: | + ./run agent start ${{ matrix.agent-name }} + cd autogpts/${{ matrix.agent-name }} + + set +e # Ignore non-zero exit codes and continue execution + echo "Running the following command: poetry run agbenchmark --maintain --mock" + poetry run agbenchmark --maintain --mock + EXIT_CODE=$? + set -e # Stop ignoring non-zero exit codes + # Check if the exit code was 5, and if so, exit with 0 instead + if [ $EXIT_CODE -eq 5 ]; then + echo "regression_tests.json is empty." + fi + + echo "Running the following command: poetry run agbenchmark --mock" + poetry run agbenchmark --mock + + echo "Running the following command: poetry run agbenchmark --mock --category=data" + poetry run agbenchmark --mock --category=data + + echo "Running the following command: poetry run agbenchmark --mock --category=coding" + poetry run agbenchmark --mock --category=coding + + echo "Running the following command: poetry run agbenchmark --test=WriteFile" + poetry run agbenchmark --test=WriteFile + cd ../../benchmark + poetry install + echo "Adding the BUILD_SKILL_TREE environment variable. This will attempt to add new elements in the skill tree. If new elements are added, the CI fails because they should have been pushed" + export BUILD_SKILL_TREE=true + + poetry run agbenchmark --mock + poetry run pytest -vv -s tests + + CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../frontend/assets)') || echo "No diffs" + if [ ! -z "$CHANGED" ]; then + echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed." + echo "$CHANGED" + exit 1 + else + echo "No unstaged changes." + fi + env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci + TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }} diff --git a/.github/workflows/benchmark_publish_package.yml b/.github/workflows/benchmark_publish_package.yml new file mode 100644 index 000000000000..3332e6501ef5 --- /dev/null +++ b/.github/workflows/benchmark_publish_package.yml @@ -0,0 +1,55 @@ +name: Publish to PyPI + +on: + workflow_dispatch: + +jobs: + deploy: + runs-on: ubuntu-latest + permissions: + contents: write + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + submodules: true + fetch-depth: 0 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: 3.8 + + - name: Install Poetry + working-directory: ./benchmark/ + run: | + curl -sSL https://install.python-poetry.org | python3 - + echo "$HOME/.poetry/bin" >> $GITHUB_PATH + + - name: Build project for distribution + working-directory: ./benchmark/ + run: poetry build + + - name: Install dependencies + working-directory: ./benchmark/ + run: poetry install + + - name: Check Version + working-directory: ./benchmark/ + id: check-version + run: | + echo version=$(poetry version --short) >> $GITHUB_OUTPUT + + - name: Create Release + uses: ncipollo/release-action@v1 + with: + artifacts: "benchmark/dist/*" + token: ${{ secrets.GITHUB_TOKEN }} + draft: false + generateReleaseNotes: false + tag: agbenchmark-v${{ steps.check-version.outputs.version }} + commit: master + + - name: Build and publish + working-directory: ./benchmark/ + run: poetry publish -u __token__ -p ${{ secrets.PYPI_API_TOKEN }} diff --git a/.github/workflows/close-stale-issues.yml b/.github/workflows/close-stale-issues.yml new file mode 100644 index 000000000000..22276417f2de --- /dev/null +++ b/.github/workflows/close-stale-issues.yml @@ -0,0 +1,34 @@ +name: 'Close stale issues' +on: + schedule: + - cron: '30 1 * * *' + workflow_dispatch: + +permissions: + issues: write + +jobs: + stale: + runs-on: ubuntu-latest + steps: + - uses: actions/stale@v9 + with: + # operations-per-run: 5000 + stale-issue-message: > + This issue has automatically been marked as _stale_ because it has not had + any activity in the last 50 days. You can _unstale_ it by commenting or + removing the label. Otherwise, this issue will be closed in 10 days. + stale-pr-message: > + This pull request has automatically been marked as _stale_ because it has + not had any activity in the last 50 days. You can _unstale_ it by commenting + or removing the label. + close-issue-message: > + This issue was closed automatically because it has been stale for 10 days + with no activity. + days-before-stale: 50 + days-before-close: 10 + # Do not touch meta issues: + exempt-issue-labels: meta,fridge,project management + # Do not affect pull requests: + days-before-pr-stale: -1 + days-before-pr-close: -1 diff --git a/.github/workflows/frontend-ci.yml b/.github/workflows/frontend-ci.yml new file mode 100644 index 000000000000..4f4d71871a13 --- /dev/null +++ b/.github/workflows/frontend-ci.yml @@ -0,0 +1,60 @@ +name: Frontend CI/CD + +on: + push: + branches: + - master + - development + - 'ci-test*' # This will match any branch that starts with "ci-test" + paths: + - 'frontend/**' + - '.github/workflows/frontend-ci.yml' + pull_request: + paths: + - 'frontend/**' + - '.github/workflows/frontend-ci.yml' + +jobs: + build: + permissions: + contents: write + pull-requests: write + runs-on: ubuntu-latest + env: + BUILD_BRANCH: ${{ format('frontend-build/{0}', github.ref_name) }} + + steps: + - name: Checkout Repo + uses: actions/checkout@v4 + + - name: Setup Flutter + uses: subosito/flutter-action@v2 + with: + flutter-version: '3.13.2' + + - name: Build Flutter to Web + run: | + cd frontend + flutter build web --base-href /app/ + + # - name: Commit and Push to ${{ env.BUILD_BRANCH }} + # if: github.event_name == 'push' + # run: | + # git config --local user.email "action@github.com" + # git config --local user.name "GitHub Action" + # git add frontend/build/web + # git checkout -B ${{ env.BUILD_BRANCH }} + # git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a + # git push -f origin ${{ env.BUILD_BRANCH }} + + - name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }} + if: github.event_name == 'push' + uses: peter-evans/create-pull-request@v6 + with: + add-paths: frontend/build/web + base: ${{ github.ref_name }} + branch: ${{ env.BUILD_BRANCH }} + delete-branch: true + title: "Update frontend build in `${{ github.ref_name }}`" + body: "This PR updates the frontend build based on commit ${{ github.sha }}." + commit-message: "Update frontend build based on commit ${{ github.sha }}" diff --git a/.github/workflows/hackathon.yml b/.github/workflows/hackathon.yml new file mode 100644 index 000000000000..94b2c752f33f --- /dev/null +++ b/.github/workflows/hackathon.yml @@ -0,0 +1,133 @@ +name: Hackathon + +on: + workflow_dispatch: + inputs: + agents: + description: "Agents to run (comma-separated)" + required: false + default: "autogpt" # Default agents if none are specified + +jobs: + matrix-setup: + runs-on: ubuntu-latest + # Service containers to run with `matrix-setup` + services: + # Label used to access the service container + postgres: + # Docker Hub image + image: postgres + # Provide the password for postgres + env: + POSTGRES_PASSWORD: postgres + # Set health checks to wait until postgres has started + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + ports: + # Maps tcp port 5432 on service container to the host + - 5432:5432 + outputs: + matrix: ${{ steps.set-matrix.outputs.matrix }} + env-name: ${{ steps.set-matrix.outputs.env-name }} + steps: + - id: set-matrix + run: | + if [ "${{ github.event_name }}" == "schedule" ]; then + echo "::set-output name=env-name::production" + echo "::set-output name=matrix::[ 'irrelevant']" + elif [ "${{ github.event_name }}" == "workflow_dispatch" ]; then + IFS=',' read -ra matrix_array <<< "${{ github.event.inputs.agents }}" + matrix_string="[ \"$(echo "${matrix_array[@]}" | sed 's/ /", "/g')\" ]" + echo "::set-output name=env-name::production" + echo "::set-output name=matrix::$matrix_string" + else + echo "::set-output name=env-name::testing" + echo "::set-output name=matrix::[ 'irrelevant' ]" + fi + + tests: + environment: + name: "${{ needs.matrix-setup.outputs.env-name }}" + needs: matrix-setup + env: + min-python-version: "3.10" + name: "${{ matrix.agent-name }}" + runs-on: ubuntu-latest + services: + # Label used to access the service container + postgres: + # Docker Hub image + image: postgres + # Provide the password for postgres + env: + POSTGRES_PASSWORD: postgres + # Set health checks to wait until postgres has started + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + ports: + # Maps tcp port 5432 on service container to the host + - 5432:5432 + timeout-minutes: 50 + strategy: + fail-fast: false + matrix: + agent-name: ${{fromJson(needs.matrix-setup.outputs.matrix)}} + steps: + - name: Print Environment Name + run: | + echo "Matrix Setup Environment Name: ${{ needs.matrix-setup.outputs.env-name }}" + + - name: Check Docker Container + id: check + run: docker ps + + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + submodules: true + + - name: Set up Python ${{ env.min-python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ env.min-python-version }} + + - id: get_date + name: Get date + run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT + + - name: Install Poetry + run: | + curl -sSL https://install.python-poetry.org | python - + + - name: Install Node.js + uses: actions/setup-node@v4 + with: + node-version: v18.15 + + - name: Run benchmark + run: | + link=$(jq -r '.["github_repo_url"]' arena/$AGENT_NAME.json) + branch=$(jq -r '.["branch_to_benchmark"]' arena/$AGENT_NAME.json) + git clone "$link" -b "$branch" "$AGENT_NAME" + cd $AGENT_NAME + cp ./autogpts/$AGENT_NAME/.env.example ./autogpts/$AGENT_NAME/.env || echo "file not found" + ./run agent start $AGENT_NAME + cd ../benchmark + poetry install + poetry run agbenchmark --no-dep + env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + SERP_API_KEY: ${{ secrets.SERP_API_KEY }} + SERPAPI_API_KEY: ${{ secrets.SERP_API_KEY }} + WEAVIATE_API_KEY: ${{ secrets.WEAVIATE_API_KEY }} + WEAVIATE_URL: ${{ secrets.WEAVIATE_URL }} + GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }} + GOOGLE_CUSTOM_SEARCH_ENGINE_ID: ${{ secrets.GOOGLE_CUSTOM_SEARCH_ENGINE_ID }} + AGENT_NAME: ${{ matrix.agent-name }} diff --git a/.github/workflows/pr-label.yml b/.github/workflows/pr-label.yml new file mode 100644 index 000000000000..415637702f51 --- /dev/null +++ b/.github/workflows/pr-label.yml @@ -0,0 +1,66 @@ +name: "Pull Request auto-label" + +on: + # So that PRs touching the same files as the push are updated + push: + branches: [ master, development, release-* ] + paths-ignore: + - 'autogpts/autogpt/tests/vcr_cassettes' + - 'benchmark/reports/**' + # So that the `dirtyLabel` is removed if conflicts are resolve + # We recommend `pull_request_target` so that github secrets are available. + # In `pull_request` we wouldn't be able to change labels of fork PRs + pull_request_target: + types: [ opened, synchronize ] + +concurrency: + group: ${{ format('pr-label-{0}', github.event.pull_request.number || github.sha) }} + cancel-in-progress: true + +jobs: + conflicts: + runs-on: ubuntu-latest + permissions: + contents: read + pull-requests: write + steps: + - name: Update PRs with conflict labels + uses: eps1lon/actions-label-merge-conflict@releases/2.x + with: + dirtyLabel: "conflicts" + #removeOnDirtyLabel: "PR: ready to ship" + repoToken: "${{ secrets.GITHUB_TOKEN }}" + commentOnDirty: "This pull request has conflicts with the base branch, please resolve those so we can evaluate the pull request." + commentOnClean: "Conflicts have been resolved! 🎉 A maintainer will review the pull request shortly." + + size: + if: ${{ github.event_name == 'pull_request_target' }} + permissions: + issues: write + pull-requests: write + runs-on: ubuntu-latest + steps: + - uses: codelytv/pr-size-labeler@v1 + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + xs_label: 'size/xs' + xs_max_size: 2 + s_label: 'size/s' + s_max_size: 10 + m_label: 'size/m' + m_max_size: 100 + l_label: 'size/l' + l_max_size: 500 + xl_label: 'size/xl' + message_if_xl: + + scope: + if: ${{ github.event_name == 'pull_request_target' }} + permissions: + contents: read + pull-requests: write + runs-on: ubuntu-latest + steps: + - uses: actions/labeler@v5 + with: + sync-labels: true diff --git a/CLI-USAGE.md b/CLI-USAGE.md new file mode 100644 index 000000000000..82c3ecc8d3a0 --- /dev/null +++ b/CLI-USAGE.md @@ -0,0 +1,182 @@ +## CLI Documentation + +This document describes how to interact with the project's CLI (Command Line Interface). It includes the types of outputs you can expect from each command. Note that the `agents stop` command will terminate any process running on port 8000. + +### 1. Entry Point for the CLI + +Running the `./run` command without any parameters will display the help message, which provides a list of available commands and options. Additionally, you can append `--help` to any command to view help information specific to that command. + +```sh +./run +``` + +**Output**: + +``` +Usage: cli.py [OPTIONS] COMMAND [ARGS]... + +Options: + --help Show this message and exit. + +Commands: + agent Commands to create, start and stop agents + benchmark Commands to start the benchmark and list tests and categories + setup Installs dependencies needed for your system. +``` + +If you need assistance with any command, simply add the `--help` parameter to the end of your command, like so: + +```sh +./run COMMAND --help +``` + +This will display a detailed help message regarding that specific command, including a list of any additional options and arguments it accepts. + +### 2. Setup Command + +```sh +./run setup +``` + +**Output**: + +``` +Setup initiated +Installation has been completed. +``` + +This command initializes the setup of the project. + +### 3. Agents Commands + +**a. List All Agents** + +```sh +./run agent list +``` + +**Output**: + +``` +Available agents: 🤖 + 🐙 forge + 🐙 autogpt +``` + +Lists all the available agents. + +**b. Create a New Agent** + +```sh +./run agent create my_agent +``` + +**Output**: + +``` +🎉 New agent 'my_agent' created and switched to the new directory in autogpts folder. +``` + +Creates a new agent named 'my_agent'. + +**c. Start an Agent** + +```sh +./run agent start my_agent +``` + +**Output**: + +``` +... (ASCII Art representing the agent startup) +[Date and Time] [forge.sdk.db] [DEBUG] 🐛 Initializing AgentDB with database_string: sqlite:///agent.db +[Date and Time] [forge.sdk.agent] [INFO] 📝 Agent server starting on http://0.0.0.0:8000 +``` + +Starts the 'my_agent' and displays startup ASCII art and logs. + +**d. Stop an Agent** + +```sh +./run agent stop +``` + +**Output**: + +``` +Agent stopped +``` + +Stops the running agent. + +### 4. Benchmark Commands + +**a. List Benchmark Categories** + +```sh +./run benchmark categories list +``` + +**Output**: + +``` +Available categories: 📚 + 📖 code + 📖 safety + 📖 memory + ... (and so on) +``` + +Lists all available benchmark categories. + +**b. List Benchmark Tests** + +```sh +./run benchmark tests list +``` + +**Output**: + +``` +Available tests: 📚 + 📖 interface + 🔬 Search - TestSearch + 🔬 Write File - TestWriteFile + ... (and so on) +``` + +Lists all available benchmark tests. + +**c. Show Details of a Benchmark Test** + +```sh +./run benchmark tests details TestWriteFile +``` + +**Output**: + +``` +TestWriteFile +------------- + + Category: interface + Task: Write the word 'Washington' to a .txt file + ... (and other details) +``` + +Displays the details of the 'TestWriteFile' benchmark test. + +**d. Start Benchmark for the Agent** + +```sh +./run benchmark start my_agent +``` + +**Output**: + +``` +(more details about the testing process shown whilst the test are running) +============= 13 failed, 1 passed in 0.97s ============... +``` + +Displays the results of the benchmark tests on 'my_agent'. diff --git a/QUICKSTART.md b/QUICKSTART.md new file mode 100644 index 000000000000..885533883236 --- /dev/null +++ b/QUICKSTART.md @@ -0,0 +1,200 @@ +# Quickstart Guide + +> For the complete getting started [tutorial series](https://aiedge.medium.com/autogpt-forge-e3de53cc58ec) <- click here + +Welcome to the Quickstart Guide! This guide will walk you through the process of setting up and running your own AutoGPT agent. Whether you're a seasoned AI developer or just starting out, this guide will provide you with the necessary steps to jumpstart your journey in the world of AI development with AutoGPT. + +## System Requirements + +This project supports Linux (Debian based), Mac, and Windows Subsystem for Linux (WSL). If you are using a Windows system, you will need to install WSL. You can find the installation instructions for WSL [here](https://learn.microsoft.com/en-us/windows/wsl/). + + +## Getting Setup +1. **Fork the Repository** + To fork the repository, follow these steps: + - Navigate to the main page of the repository. + + ![Repository](docs/content/imgs/quickstart/001_repo.png) + - In the top-right corner of the page, click Fork. + + ![Create Fork UI](docs/content/imgs/quickstart/002_fork.png) + - On the next page, select your GitHub account to create the fork under. + - Wait for the forking process to complete. You now have a copy of the repository in your GitHub account. + +2. **Clone the Repository** + To clone the repository, you need to have Git installed on your system. If you don't have Git installed, you can download it from [here](https://git-scm.com/downloads). Once you have Git installed, follow these steps: + - Open your terminal. + - Navigate to the directory where you want to clone the repository. + - Run the git clone command for the fork you just created + + ![Clone the Repository](docs/content/imgs/quickstart/003_clone.png) + + - Then open your project in your ide + + ![Open the Project in your IDE](docs/content/imgs/quickstart/004_ide.png) + +4. **Setup the Project** + Next we need to setup the required dependencies. We have a tool for helping you do all the tasks you need to on the repo. + It can be accessed by running the `run` command by typing `./run` in the terminal. + + The first command you need to use is `./run setup` This will guide you through the process of setting up your system. + Initially you will get instructions for installing flutter, chrome and setting up your github access token like the following image: + + > Note: for advanced users. The github access token is only needed for the ./run arena enter command so the system can automatically create a PR + + + ![Setup the Project](docs/content/imgs/quickstart/005_setup.png) + +### For Windows Users + +If you're a Windows user and experience issues after installing WSL, follow the steps below to resolve them. + +#### Update WSL +Run the following command in Powershell or Command Prompt to: +1. Enable the optional WSL and Virtual Machine Platform components. +2. Download and install the latest Linux kernel. +3. Set WSL 2 as the default. +4. Download and install the Ubuntu Linux distribution (a reboot may be required). + +```shell +wsl --install +``` + +For more detailed information and additional steps, refer to [Microsoft's WSL Setup Environment Documentation](https://learn.microsoft.com/en-us/windows/wsl/setup/environment). + +#### Resolve FileNotFoundError or "No such file or directory" Errors +When you run `./run setup`, if you encounter errors like `No such file or directory` or `FileNotFoundError`, it might be because Windows-style line endings (CRLF - Carriage Return Line Feed) are not compatible with Unix/Linux style line endings (LF - Line Feed). + +To resolve this, you can use the `dos2unix` utility to convert the line endings in your script from CRLF to LF. Here’s how to install and run `dos2unix` on the script: + +```shell +sudo apt update +sudo apt install dos2unix +dos2unix ./run +``` + +After executing the above commands, running `./run setup` should work successfully. + +#### Store Project Files within the WSL File System +If you continue to experience issues, consider storing your project files within the WSL file system instead of the Windows file system. This method avoids issues related to path translations and permissions and provides a more consistent development environment. + +You can keep running the command to get feedback on where you are up to with your setup. +When setup has been completed, the command will return an output like this: + +![Setup Complete](docs/content/imgs/quickstart/006_setup_complete.png) + +## Creating Your Agent + +After completing the setup, the next step is to create your agent template. +Execute the command `./run agent create YOUR_AGENT_NAME`, where `YOUR_AGENT_NAME` should be replaced with a name of your choosing. + +Tips for naming your agent: +* Give it its own unique name, or name it after yourself +* Include an important aspect of your agent in the name, such as its purpose + +Examples: `SwiftyosAssistant`, `PwutsPRAgent`, `Narvis`, `evo.ninja` + +![Create an Agent](docs/content/imgs/quickstart/007_create_agent.png) + +### Optional: Entering the Arena + +Entering the Arena is an optional step intended for those who wish to actively participate in the agent leaderboard. If you decide to participate, you can enter the Arena by running `./run arena enter YOUR_AGENT_NAME`. This step is not mandatory for the development or testing of your agent. + +Entries with names like `agent`, `ExampleAgent`, `test_agent` or `MyExampleGPT` will NOT be merged. We also don't accept copycat entries that use the name of other projects, like `AutoGPT` or `evo.ninja`. + +![Enter the Arena](docs/content/imgs/quickstart/008_enter_arena.png) + +> **Note** +> For advanced users, create a new branch and create a file called YOUR_AGENT_NAME.json in the arena directory. Then commit this and create a PR to merge into the main repo. Only single file entries will be permitted. The json file needs the following format: +> ```json +> { +> "github_repo_url": "https://github.com/Swiftyos/YourAgentName", +> "timestamp": "2023-09-18T10:03:38.051498", +> "commit_hash_to_benchmark": "ac36f7bfc7f23ad8800339fa55943c1405d80d5e", +> "branch_to_benchmark": "master" +> } +> ``` +> - `github_repo_url`: the url to your fork +> - `timestamp`: timestamp of the last update of this file +> - `commit_hash_to_benchmark`: the commit hash of your entry. You update each time you have an something ready to be officially entered into the hackathon +> - `branch_to_benchmark`: the branch you are using to develop your agent on, default is master. + + +## Running your Agent + +Your agent can started using the `./run agent start YOUR_AGENT_NAME` + +This start the agent on `http://localhost:8000/` + +![Start the Agent](docs/content/imgs/quickstart/009_start_agent.png) + +The frontend can be accessed from `http://localhost:8000/`, you will first need to login using either a google account or your github account. + +![Login](docs/content/imgs/quickstart/010_login.png) + +Upon logging in you will get a page that looks something like this. With your task history down the left hand side of the page and the 'chat' window to send tasks to your agent. + +![Login](docs/content/imgs/quickstart/011_home.png) + +When you have finished with your agent, or if you just need to restart it, use Ctl-C to end the session then you can re-run the start command. + +If you are having issues and want to ensure the agent has been stopped there is a `./run agent stop` command which will kill the process using port 8000, which should be the agent. + +## Benchmarking your Agent + +The benchmarking system can also be accessed using the cli too: + +```bash +agpt % ./run benchmark +Usage: cli.py benchmark [OPTIONS] COMMAND [ARGS]... + + Commands to start the benchmark and list tests and categories + +Options: + --help Show this message and exit. + +Commands: + categories Benchmark categories group command + start Starts the benchmark command + tests Benchmark tests group command +agpt % ./run benchmark categories +Usage: cli.py benchmark categories [OPTIONS] COMMAND [ARGS]... + + Benchmark categories group command + +Options: + --help Show this message and exit. + +Commands: + list List benchmark categories command +agpt % ./run benchmark tests +Usage: cli.py benchmark tests [OPTIONS] COMMAND [ARGS]... + + Benchmark tests group command + +Options: + --help Show this message and exit. + +Commands: + details Benchmark test details command + list List benchmark tests command +``` + +The benchmark has been split into different categories of skills you can test your agent on. You can see what categories are available with +```bash +./run benchmark categories list +# And what tests are available with +./run benchmark tests list +``` + +![Login](docs/content/imgs/quickstart/012_tests.png) + + +Finally you can run the benchmark with + +```bash +./run benchmark start YOUR_AGENT_NAME + +``` + +> diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 000000000000..4d51706d4d99 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,66 @@ +# Security Policy + + - [**Using AutoGPT Securely**](#using-AutoGPT-securely) + - [Restrict Workspace](#restrict-workspace) + - [Untrusted inputs](#untrusted-inputs) + - [Data privacy](#data-privacy) + - [Untrusted environments or networks](#untrusted-environments-or-networks) + - [Multi-Tenant environments](#multi-tenant-environments) + - [**Reporting a Vulnerability**](#reporting-a-vulnerability) + +## Using AutoGPT Securely + +### Restrict Workspace + +Since agents can read and write files, it is important to keep them restricted to a specific workspace. This happens by default *unless* RESTRICT_TO_WORKSPACE is set to False. + +Disabling RESTRICT_TO_WORKSPACE can increase security risks. However, if you still need to disable it, consider running AutoGPT inside a [sandbox](https://developers.google.com/code-sandboxing), to mitigate some of these risks. + +### Untrusted inputs + +When handling untrusted inputs, it's crucial to isolate the execution and carefully pre-process inputs to mitigate script injection risks. + +For maximum security when handling untrusted inputs, you may need to employ the following: + +* Sandboxing: Isolate the process. +* Updates: Keep your libraries (including AutoGPT) updated with the latest security patches. +* Input Sanitation: Before feeding data to the model, sanitize inputs rigorously. This involves techniques such as: + * Validation: Enforce strict rules on allowed characters and data types. + * Filtering: Remove potentially malicious scripts or code fragments. + * Encoding: Convert special characters into safe representations. + * Verification: Run tooling that identifies potential script injections (e.g. [models that detect prompt injection attempts](https://python.langchain.com/docs/guides/safety/hugging_face_prompt_injection)). + +### Data privacy + +To protect sensitive data from potential leaks or unauthorized access, it is crucial to sandbox the agent execution. This means running it in a secure, isolated environment, which helps mitigate many attack vectors. + +### Untrusted environments or networks + +Since AutoGPT performs network calls to the OpenAI API, it is important to always run it with trusted environments and networks. Running it on untrusted environments can expose your API KEY to attackers. +Additionally, running it on an untrusted network can expose your data to potential network attacks. + +However, even when running on trusted networks, it is important to always encrypt sensitive data while sending it over the network. + +### Multi-Tenant environments + +If you intend to run multiple AutoGPT brains in parallel, it is your responsibility to ensure the models do not interact or access each other's data. + +The primary areas of concern are tenant isolation, resource allocation, model sharing and hardware attacks. + +- Tenant Isolation: you must make sure that the tenants run separately to prevent unwanted access to the data from other tenants. Keeping model network traffic separate is also important because you not only prevent unauthorized access to data, but also prevent malicious users or tenants sending prompts to execute under another tenant’s identity. + +- Resource Allocation: a denial of service caused by one tenant can affect the overall system health. Implement safeguards like rate limits, access controls, and health monitoring. + +- Data Sharing: in a multi-tenant design with data sharing, ensure tenants and users understand the security risks and sandbox agent execution to mitigate risks. + +- Hardware Attacks: the hardware (GPUs or TPUs) can also be attacked. [Research](https://scholar.google.com/scholar?q=gpu+side+channel) has shown that side channel attacks on GPUs are possible, which can make data leak from other brains or processes running on the same system at the same time. + +## Reporting a Vulnerability + +Beware that none of the topics under [Using AutoGPT Securely](#using-AutoGPT-securely) are considered vulnerabilities on AutoGPT. + +However, If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. + +Please disclose it as a private [security advisory](https://github.com/Significant-Gravitas/AutoGPT/security/advisories/new). + +A team of volunteers on a reasonable-effort basis maintains this project. As such, please give us at least 90 days to work on a fix before public exposure. diff --git a/TROUBLESHOOTING.md b/TROUBLESHOOTING.md new file mode 100644 index 000000000000..2b51851bb104 --- /dev/null +++ b/TROUBLESHOOTING.md @@ -0,0 +1,23 @@ +This page is a list of issues you could encounter along with their fixes. + +# Forge +**Poetry configuration invalid** + +The poetry configuration is invalid: +- Additional properties are not allowed ('group' was unexpected) +Screenshot 2023-09-22 at 5 42 59 PM + +**Pydantic Validation Error** + +Remove your sqlite agent.db file. it's probably because some of your data is not complying with the new spec (we will create migrations soon to avoid this problem) + + +*Solution* + +Update poetry + +# Benchmark +TODO + +# Frontend +TODO diff --git a/arena/480bot.json b/arena/480bot.json new file mode 100644 index 000000000000..819ac9cc9847 --- /dev/null +++ b/arena/480bot.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/480/AutoGPT", + "timestamp": "2023-10-22T06:49:52.536177", + "commit_hash_to_benchmark": "16e266c65fb4620a1b1397532c503fa426ec191d", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/AGENT_GORDON.json b/arena/AGENT_GORDON.json new file mode 100644 index 000000000000..98784273f92c --- /dev/null +++ b/arena/AGENT_GORDON.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/filipjakubowski/AutoGPT", + "timestamp": "2023-11-01T17:13:24.272333", + "commit_hash_to_benchmark": "78e92234d63a69b5471da0c0e62ce820a9109dd4", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/AGENT_JARVIS.json b/arena/AGENT_JARVIS.json new file mode 100644 index 000000000000..ac284f6aa1c6 --- /dev/null +++ b/arena/AGENT_JARVIS.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/filipjakubowski/AutoGPT", + "timestamp": "2023-11-04T10:13:11.039444", + "commit_hash_to_benchmark": "78e92234d63a69b5471da0c0e62ce820a9109dd4", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/AI.json b/arena/AI.json new file mode 100644 index 000000000000..a6b27fdb1157 --- /dev/null +++ b/arena/AI.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/QingquanBao/AutoGPT", + "timestamp": "2023-11-01T16:20:51.086235", + "commit_hash_to_benchmark": "78e92234d63a69b5471da0c0e62ce820a9109dd4", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/AKBAgent.json b/arena/AKBAgent.json new file mode 100644 index 000000000000..71e8cea91b0b --- /dev/null +++ b/arena/AKBAgent.json @@ -0,0 +1,7 @@ +{ + "github_repo_url": "https://github.com/imakb/AKBAgent", + "timestamp": "2023-10-31T00:03:23.000000", + "commit_hash_to_benchmark": "c65b71d51d8f849663172c5a128953b4ca92b2b0", + "branch_to_benchmark": "AKBAgent" +} + diff --git a/arena/ASSISTANT.json b/arena/ASSISTANT.json new file mode 100644 index 000000000000..bd0c0f055f88 --- /dev/null +++ b/arena/ASSISTANT.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/hongzzz/AutoGPT", + "timestamp": "2023-10-13T03:22:59.347424", + "commit_hash_to_benchmark": "38790a27ed2c1b63a301b6a67e7590f2d30de53e", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/AUTO_ENGINEER.json b/arena/AUTO_ENGINEER.json new file mode 100644 index 000000000000..5f8e28c973cf --- /dev/null +++ b/arena/AUTO_ENGINEER.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/kaiomagalhaes/AutoGPT", + "timestamp": "2023-10-04T15:25:30.458687", + "commit_hash_to_benchmark": "1bd85cbc09473c0252928fb849ae8373607d6065", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/AUTO_GPT_JON001.json b/arena/AUTO_GPT_JON001.json new file mode 100644 index 000000000000..f36fad390296 --- /dev/null +++ b/arena/AUTO_GPT_JON001.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Jonobinsoftware/AutoGPT-Tutorial", + "timestamp": "2023-10-10T06:01:23.439061", + "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Adtractive_Agent.json b/arena/Adtractive_Agent.json new file mode 100644 index 000000000000..ebec6e6ad4a7 --- /dev/null +++ b/arena/Adtractive_Agent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/aivaras-mazylis/AutoGPT", + "timestamp": "2023-10-17T13:16:16.327237", + "commit_hash_to_benchmark": "1eadc64dc0a693c7c9de77ddaef857f3a36f7950", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/AgGPT.json b/arena/AgGPT.json new file mode 100644 index 000000000000..07751b8ecac6 --- /dev/null +++ b/arena/AgGPT.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/althaf004/AutoGPT", + "timestamp": "2023-09-26T03:40:03.658369", + "commit_hash_to_benchmark": "4a8da53d85d466f2eb325c745a2c03cf88792e7d", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/AgentJPark.json b/arena/AgentJPark.json new file mode 100644 index 000000000000..636e4d1f79c3 --- /dev/null +++ b/arena/AgentJPark.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/againeureka/AutoGPT", + "timestamp": "2023-10-12T02:20:01.005361", + "commit_hash_to_benchmark": "766796ae1e8c07cf2a03b607621c3da6e1f01a31", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/AgentKD.json b/arena/AgentKD.json new file mode 100644 index 000000000000..1aa340eac8e5 --- /dev/null +++ b/arena/AgentKD.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/kitdesai/AgentKD", + "timestamp": "2023-10-14T02:35:09.979434", + "commit_hash_to_benchmark": "93e3ec36ed6cd9e5e60585f016ad3bef4e1c52cb", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Ahmad.json b/arena/Ahmad.json new file mode 100644 index 000000000000..2b5b86f12481 --- /dev/null +++ b/arena/Ahmad.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/JawadAbu/AutoGPT.git", + "timestamp": "2023-11-05T12:35:35.352028", + "commit_hash_to_benchmark": "a1d60878141116641ea864ef6de7ca6142e9534c", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Alfred.json b/arena/Alfred.json new file mode 100644 index 000000000000..be510f1fd414 --- /dev/null +++ b/arena/Alfred.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Shadowless422/Alfred", + "timestamp": "2023-10-03T10:42:45.473477", + "commit_hash_to_benchmark": "949ab477a87cfb7a3668d7961e9443922081e098", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/AlphaCISO.json b/arena/AlphaCISO.json new file mode 100644 index 000000000000..06791274b135 --- /dev/null +++ b/arena/AlphaCISO.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/alphaciso/AutoGPT", + "timestamp": "2023-10-21T08:26:41.961187", + "commit_hash_to_benchmark": "415b4ceed1417d0b21d87d7d4ea0cd38943e264f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/AndersLensway.json b/arena/AndersLensway.json new file mode 100644 index 000000000000..6bbf68fdf90e --- /dev/null +++ b/arena/AndersLensway.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/4nd3rs/AutoGPT", + "timestamp": "2023-10-11T11:00:08.150159", + "commit_hash_to_benchmark": "57bcbdf45c6c1493a4e5f6a4e72594ea13c10f93", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/AntlerTestGPT.json b/arena/AntlerTestGPT.json new file mode 100644 index 000000000000..9df76d4a8e16 --- /dev/null +++ b/arena/AntlerTestGPT.json @@ -0,0 +1 @@ +{"github_repo_url": "https://github.com/pjw1/AntlerAI", "timestamp": "2023-10-07T11:46:39Z", "commit_hash_to_benchmark": "f81e086e5647370854ec639c531c900775a99207", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/AppleGPT.json b/arena/AppleGPT.json new file mode 100644 index 000000000000..7fe3a7beeea9 --- /dev/null +++ b/arena/AppleGPT.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Nimit3-droid/AutoGPT", + "timestamp": "2023-10-03T11:59:15.495902", + "commit_hash_to_benchmark": "d8d7fc4858a8d13407f6d7da360c6b5d398f2175", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/AquaAgent.json b/arena/AquaAgent.json new file mode 100644 index 000000000000..6deb549db137 --- /dev/null +++ b/arena/AquaAgent.json @@ -0,0 +1 @@ +{"github_repo_url": "https://github.com/somnistudio/SomniGPT", "timestamp": "2023-10-06T16:40:14Z", "commit_hash_to_benchmark": "47eb5124fa97187d7f3fa4036e422cd771cf0ae7", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/ArtistManagerGPT.json b/arena/ArtistManagerGPT.json new file mode 100644 index 000000000000..881ed049b91f --- /dev/null +++ b/arena/ArtistManagerGPT.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/AmahAjavon/AutoGPT", + "timestamp": "2023-10-28T20:32:15.845741", + "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/AskOpie.json b/arena/AskOpie.json new file mode 100644 index 000000000000..a2f6bd3938a1 --- /dev/null +++ b/arena/AskOpie.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/arunqa/AutoGPT", + "timestamp": "2023-09-26T05:13:24.466017", + "commit_hash_to_benchmark": "4a8da53d85d466f2eb325c745a2c03cf88792e7d", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Auto.json b/arena/Auto.json new file mode 100644 index 000000000000..9bad9db50e9d --- /dev/null +++ b/arena/Auto.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Nikhil8652/AutoGPT", + "timestamp": "2023-10-16T09:12:17.452121", + "commit_hash_to_benchmark": "2f79caa6b901d006a78c1ac9e69db4465c0f971a", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/AutoGPT-ariel.json b/arena/AutoGPT-ariel.json new file mode 100644 index 000000000000..cefa43620551 --- /dev/null +++ b/arena/AutoGPT-ariel.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/RedTachyon/AutoGPT", + "timestamp": "2023-10-21T22:31:30.871023", + "commit_hash_to_benchmark": "eda21d51921899756bf866cf5c4d0f2dcd3e2e23", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/AutoGPT2.json b/arena/AutoGPT2.json new file mode 100644 index 000000000000..11a71f66f04e --- /dev/null +++ b/arena/AutoGPT2.json @@ -0,0 +1 @@ +{"github_repo_url": "https://github.com/SarahGrevy/AutoGPT", "timestamp": "2023-10-20T17:21:22Z", "commit_hash_to_benchmark": "32300906c9aafea8c550fa2f9edcc113fbfc512c", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/AutoGenius.json b/arena/AutoGenius.json new file mode 100644 index 000000000000..3974b9dcc8eb --- /dev/null +++ b/arena/AutoGenius.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/JasonDRZ/AutoGPT", + "timestamp": "2023-10-26T13:27:58.805270", + "commit_hash_to_benchmark": "ab2a61833584c42ededa805cbac50718c72aa5ae", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/AutoTDD.json b/arena/AutoTDD.json new file mode 100644 index 000000000000..ea61ddd8261e --- /dev/null +++ b/arena/AutoTDD.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/vshneer/AutoTDD", + "timestamp": "2023-10-11T19:14:30.939747", + "commit_hash_to_benchmark": "766796ae1e8c07cf2a03b607621c3da6e1f01a31", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/AutoTestGenerator.json b/arena/AutoTestGenerator.json new file mode 100644 index 000000000000..c28d6da87ad3 --- /dev/null +++ b/arena/AutoTestGenerator.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/cagdasbas/AutoGPT", + "timestamp": "2023-10-15T08:43:40.193080", + "commit_hash_to_benchmark": "74ee69daf1c0a2603f19bdb1edcfdf1f4e06bcff", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/AwareAgent.json b/arena/AwareAgent.json new file mode 100644 index 000000000000..d4155dd67e9e --- /dev/null +++ b/arena/AwareAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/LuisLechugaRuiz/AwareAgent", + "timestamp": "2023-10-26T10:10:01.481205", + "commit_hash_to_benchmark": "c180063dde49af02ed95ec4c019611da0a5540d7", + "branch_to_benchmark": "master" +} diff --git a/arena/Bagi_agent.json b/arena/Bagi_agent.json new file mode 100644 index 000000000000..4251bb4246c4 --- /dev/null +++ b/arena/Bagi_agent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/xpineda/AutoGPT_xabyvng.git", + "timestamp": "2023-10-20T09:21:48.837635", + "commit_hash_to_benchmark": "2187f66149ffa4bb99f9ca6a11b592fe4d683791", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/BanglaSgAgent.json b/arena/BanglaSgAgent.json new file mode 100644 index 000000000000..12014fe8d058 --- /dev/null +++ b/arena/BanglaSgAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/aniruddha-adhikary/AutoGPT", + "timestamp": "2023-09-27T15:32:24.056105", + "commit_hash_to_benchmark": "6f289e6dfa8246f8993b76c933527f3707b8d7e5", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Baptiste.json b/arena/Baptiste.json new file mode 100644 index 000000000000..691f62952f3d --- /dev/null +++ b/arena/Baptiste.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Baptistecaille/AutoGPT", + "timestamp": "2023-10-01T19:44:23.416591", + "commit_hash_to_benchmark": "3da29eae45683457131ee8736bedae7e2a74fbba", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Bravo06.json b/arena/Bravo06.json new file mode 100644 index 000000000000..21ceec258b6d --- /dev/null +++ b/arena/Bravo06.json @@ -0,0 +1 @@ +{"github_repo_url": "https://github.com/jafar-albadarneh/Bravo06GPT", "timestamp": "2023-10-04T23:01:27Z", "commit_hash_to_benchmark": "f8c177b4b0e4ca45a3a104011b866c0415c648f1", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/Brillante-AI.json b/arena/Brillante-AI.json new file mode 100644 index 000000000000..3c81a02c0d30 --- /dev/null +++ b/arena/Brillante-AI.json @@ -0,0 +1 @@ +{"github_repo_url": "https://github.com/dabeer021/Brillante-AI", "timestamp": "2023-10-02T19:05:04Z", "commit_hash_to_benchmark": "163ab75379e1ee7792f50d4d70a1f482ca9cb6a1", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/Bunny.json b/arena/Bunny.json new file mode 100644 index 000000000000..33c2b0d1a82a --- /dev/null +++ b/arena/Bunny.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/razorhasbeen/AutoGPT", + "timestamp": "2023-10-03T11:50:56.725628", + "commit_hash_to_benchmark": "d8d7fc4858a8d13407f6d7da360c6b5d398f2175", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/CCAgent.json b/arena/CCAgent.json new file mode 100644 index 000000000000..899172e343d1 --- /dev/null +++ b/arena/CCAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/ccsnow127/AutoGPT", + "timestamp": "2023-10-21T13:57:15.131761", + "commit_hash_to_benchmark": "e9b64adae9fce180a392c726457e150177e746fb", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/CES-GPT.json b/arena/CES-GPT.json new file mode 100644 index 000000000000..016804e65938 --- /dev/null +++ b/arena/CES-GPT.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/ces-sonnguyen/CES-GPT", + "timestamp": "2023-10-30T07:45:07.337258", + "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/CISLERK.json b/arena/CISLERK.json new file mode 100644 index 000000000000..1370a0a2d30e --- /dev/null +++ b/arena/CISLERK.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/cislerk/AutoGPT", + "timestamp": "2023-10-10T18:40:50.718850", + "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/CONNECTBOT.json b/arena/CONNECTBOT.json new file mode 100644 index 000000000000..b43e147a98b8 --- /dev/null +++ b/arena/CONNECTBOT.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/myncow/DocumentAgent.git", + "timestamp": "2023-10-31T21:21:28.951345", + "commit_hash_to_benchmark": "c65b71d51d8f849663172c5a128953b4ca92b2b0", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/CYNO_AGENT.json b/arena/CYNO_AGENT.json new file mode 100644 index 000000000000..288802d5d7dc --- /dev/null +++ b/arena/CYNO_AGENT.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/dr1yl/AutoGPT", + "timestamp": "2023-10-09T20:01:05.041446", + "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/ChadGPT.json b/arena/ChadGPT.json new file mode 100644 index 000000000000..6a378b1d8f7d --- /dev/null +++ b/arena/ChadGPT.json @@ -0,0 +1 @@ +{"github_repo_url": "https://github.com/Ahmad-Alaziz/ChadGPT", "timestamp": "2023-10-26T09:39:35Z", "commit_hash_to_benchmark": "84dd029c011379791a6fec8b148b2982a2ef159e", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/ChrisGPT.json b/arena/ChrisGPT.json new file mode 100644 index 000000000000..6ec46681e366 --- /dev/null +++ b/arena/ChrisGPT.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/darkcyber-ninja/AutoGPT", + "timestamp": "2023-10-31T17:55:41.458834", + "commit_hash_to_benchmark": "c65b71d51d8f849663172c5a128953b4ca92b2b0", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/CodeAutoGPT.json b/arena/CodeAutoGPT.json new file mode 100644 index 000000000000..1780a4966ceb --- /dev/null +++ b/arena/CodeAutoGPT.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/hugomastromauro/AutoGPT", + "timestamp": "2023-11-01T13:21:42.624202", + "commit_hash_to_benchmark": "78e92234d63a69b5471da0c0e62ce820a9109dd4", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/CreaitorMarketing.json b/arena/CreaitorMarketing.json new file mode 100644 index 000000000000..38ffca0f8304 --- /dev/null +++ b/arena/CreaitorMarketing.json @@ -0,0 +1 @@ +{"github_repo_url": "https://github.com/simonfunk/Auto-GPT", "timestamp": "2023-10-08T02:10:18Z", "commit_hash_to_benchmark": "e99e9b6181f091a9625ef9b922dac15dd5f0a885", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/CurieAssistant.json b/arena/CurieAssistant.json new file mode 100644 index 000000000000..bdbd14c9c06d --- /dev/null +++ b/arena/CurieAssistant.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/HMDCrew/AutoGPT", + "timestamp": "2023-10-06T20:41:26.293944", + "commit_hash_to_benchmark": "9e353e09b5df39d4d410bef57cf17387331e96f6", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/DE.json b/arena/DE.json new file mode 100644 index 000000000000..fcea35c9d3f6 --- /dev/null +++ b/arena/DE.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/wic0144/AutoGPT", + "timestamp": "2023-10-26T09:05:21.013962", + "commit_hash_to_benchmark": "89d333f3bb422495f21e04bdd2bba3cb8c1a34ae", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/DavidsAgent.json b/arena/DavidsAgent.json new file mode 100644 index 000000000000..f824fd14dc93 --- /dev/null +++ b/arena/DavidsAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/beisdog/AutoGPT", + "timestamp": "2023-09-29T22:06:18.846082", + "commit_hash_to_benchmark": "d6abb27db61142a70defd0c75b53985ea9a71fce", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Derpmaster.json b/arena/Derpmaster.json new file mode 100644 index 000000000000..6a4e159e5370 --- /dev/null +++ b/arena/Derpmaster.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/schumacher-m/Derpmaster", + "timestamp": "2023-10-30T21:10:27.407732", + "commit_hash_to_benchmark": "d9fbd26b8563e5f59d705623bae0d5cf9c9499c7", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/DevOpsAgent.json b/arena/DevOpsAgent.json new file mode 100644 index 000000000000..6f3384cd64d3 --- /dev/null +++ b/arena/DevOpsAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/rahuldotar/AutoGPT", + "timestamp": "2023-10-02T11:34:29.870077", + "commit_hash_to_benchmark": "062d286c239dc863ede4ad475d7348698722f5fa", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Drench.json b/arena/Drench.json new file mode 100644 index 000000000000..49417551e2af --- /dev/null +++ b/arena/Drench.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/MohamedBasueny/AutoGPT-Drench", + "timestamp": "2023-10-27T01:28:13.869318", + "commit_hash_to_benchmark": "21b809794a90cf6f9a6aa41f179f420045becadc", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Eduardo.json b/arena/Eduardo.json new file mode 100644 index 000000000000..dfffd902d869 --- /dev/null +++ b/arena/Eduardo.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/MuriloEduardo/AutoGPT.git", + "timestamp": "2023-09-25T03:18:20.659056", + "commit_hash_to_benchmark": "ffa76c3a192c36827669335de4390262da5fd972", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/EmbeddedAg.json b/arena/EmbeddedAg.json new file mode 100644 index 000000000000..b26355e38e25 --- /dev/null +++ b/arena/EmbeddedAg.json @@ -0,0 +1 @@ +{"github_repo_url": "https://github.com/Significant-Gravitas/AutoGPT", "timestamp": "2023-10-26T09:15:50Z", "commit_hash_to_benchmark": "6c9152a95c8994898c47c85ea90ba58e0cc02c28", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/EnglishTestpaperAgent.json b/arena/EnglishTestpaperAgent.json new file mode 100644 index 000000000000..7271eb0c9ca6 --- /dev/null +++ b/arena/EnglishTestpaperAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/kyannai/AutoGPT", + "timestamp": "2023-09-29T03:05:45.504690", + "commit_hash_to_benchmark": "1f367618edf903f38dff4dd064f96e611ffc5242", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/ExampleAgent.json b/arena/ExampleAgent.json new file mode 100644 index 000000000000..2fb8c44a3c11 --- /dev/null +++ b/arena/ExampleAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/janekdijkstra/AutoGPT", + "timestamp": "2023-10-16T12:12:54.998033", + "commit_hash_to_benchmark": "2f79caa6b901d006a78c1ac9e69db4465c0f971a", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/FLASH.json b/arena/FLASH.json new file mode 100644 index 000000000000..7cce9c10e3f7 --- /dev/null +++ b/arena/FLASH.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/flashdumper/AutoGPT", + "timestamp": "2023-10-30T23:02:13.653861", + "commit_hash_to_benchmark": "d9fbd26b8563e5f59d705623bae0d5cf9c9499c7", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/FactoryGPT.json b/arena/FactoryGPT.json new file mode 100644 index 000000000000..e66434c3961d --- /dev/null +++ b/arena/FactoryGPT.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/neilmartindev/FactoryGPT", + "timestamp": "2023-10-04T16:24:58.525870", + "commit_hash_to_benchmark": "1bd85cbc09473c0252928fb849ae8373607d6065", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/FcsummerGPT.json b/arena/FcsummerGPT.json new file mode 100644 index 000000000000..2f2eb88fa59f --- /dev/null +++ b/arena/FcsummerGPT.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/fbk111/FcsummerGPT", + "timestamp": "2023-10-25T09:58:39.801277", + "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/FynAgent.json b/arena/FynAgent.json new file mode 100644 index 000000000000..1f006e63ea9d --- /dev/null +++ b/arena/FynAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/tomkat-cr/AutoGPT.git", + "timestamp": "2023-10-18T09:41:21.282992", + "commit_hash_to_benchmark": "e9b64adae9fce180a392c726457e150177e746fb", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/GG.json b/arena/GG.json new file mode 100644 index 000000000000..78421b484996 --- /dev/null +++ b/arena/GG.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/IgorCIs/AutoGPT", + "timestamp": "2023-09-27T14:01:20.964953", + "commit_hash_to_benchmark": "a14aadd91493886663232bfd23c0412609f2a2fc", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/GPTTest.json b/arena/GPTTest.json new file mode 100644 index 000000000000..e2c1c0af37b2 --- /dev/null +++ b/arena/GPTTest.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/h3llix/GPTTest.git", + "timestamp": "2023-11-02T10:56:53.142288", + "commit_hash_to_benchmark": "d9ec0ac3ad7b48eb44e6403e88d2dc5696fd4950", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/GameSoundGPT.json b/arena/GameSoundGPT.json new file mode 100644 index 000000000000..66fe962ab2a6 --- /dev/null +++ b/arena/GameSoundGPT.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/mordvinov/AutoGPT", + "timestamp": "2023-10-13T14:48:02.852293", + "commit_hash_to_benchmark": "93e3ec36ed6cd9e5e60585f016ad3bef4e1c52cb", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/GeorgeGPT.json b/arena/GeorgeGPT.json new file mode 100644 index 000000000000..83ce96df7385 --- /dev/null +++ b/arena/GeorgeGPT.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/norn93/GeorgeGPT", + "timestamp": "2023-10-17T14:38:41.051458", + "commit_hash_to_benchmark": "1eadc64dc0a693c7c9de77ddaef857f3a36f7950", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Granger.json b/arena/Granger.json new file mode 100644 index 000000000000..203e99c34433 --- /dev/null +++ b/arena/Granger.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/balloch/AutoGPTProblemSolver", + "timestamp": "2023-09-29T15:11:44.876627", + "commit_hash_to_benchmark": "9fb6d5bbbd6928402a5718b8c249811c6f682a88", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/HACKATHON.json b/arena/HACKATHON.json new file mode 100644 index 000000000000..7f29e7582d5d --- /dev/null +++ b/arena/HACKATHON.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/manuel-soria/AutoGPT", + "timestamp": "2023-10-07T16:55:38.741776", + "commit_hash_to_benchmark": "a00d880a3fd62373f53a0b0a45c9dcfdb45968e4", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/HMD2.json b/arena/HMD2.json new file mode 100644 index 000000000000..5ef36bd18af0 --- /dev/null +++ b/arena/HMD2.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/HMDCrew/AutoGPT", + "timestamp": "2023-10-09T08:46:37.457740", + "commit_hash_to_benchmark": "9e353e09b5df39d4d410bef57cf17387331e96f6", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Heisenberg.json b/arena/Heisenberg.json new file mode 100644 index 000000000000..a77ce87d775c --- /dev/null +++ b/arena/Heisenberg.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/georgehaws/Heisenberg", + "timestamp": "2023-10-02T16:07:18-07:00", + "commit_hash_to_benchmark": "949ab477a87cfb7a3668d7961e9443922081e098", + "branch_to_benchmark": "master" +} diff --git a/arena/HekolcuAutoGPT.json b/arena/HekolcuAutoGPT.json new file mode 100644 index 000000000000..e64dd9c632fe --- /dev/null +++ b/arena/HekolcuAutoGPT.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/hekolcu/AutoGPT", + "timestamp": "2023-09-30T17:31:20.979122", + "commit_hash_to_benchmark": "a0fba5d1f13d35a1c4a8b7718550677bf62b5101", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/HuitzilAiAgent.json b/arena/HuitzilAiAgent.json new file mode 100644 index 000000000000..6e832eafa2af --- /dev/null +++ b/arena/HuitzilAiAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/codetitlan/AutoGPT-CDTHB", + "timestamp": "2023-10-03T15:04:54.856291", + "commit_hash_to_benchmark": "3374fd181852d489e51ee33a25d12a064a0bb55d", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Hypeman.json b/arena/Hypeman.json new file mode 100644 index 000000000000..d32bcb9e483d --- /dev/null +++ b/arena/Hypeman.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/kennyu/KenGPT", + "timestamp": "2023-09-27T19:50:31.443494", + "commit_hash_to_benchmark": "cf630e4f2cee04fd935612f95308322cd9eb1df7", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/IncredibubbleTea.json b/arena/IncredibubbleTea.json new file mode 100644 index 000000000000..6908e6be2c84 --- /dev/null +++ b/arena/IncredibubbleTea.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/mariepop13/AutoGPT", + "timestamp": "2023-10-25T18:38:32.012583", + "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/JackGPT.json b/arena/JackGPT.json new file mode 100644 index 000000000000..007286814efa --- /dev/null +++ b/arena/JackGPT.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/JackDance/AutoGPT", + "timestamp": "2023-10-09T08:26:35.181112", + "commit_hash_to_benchmark": "f77d383a9f5e66a35d6008bd43cab4d93999cb61", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Jarvis.json b/arena/Jarvis.json new file mode 100644 index 000000000000..bb098270eca3 --- /dev/null +++ b/arena/Jarvis.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/elynch303/AutoGPT", + "timestamp": "2023-10-12T14:15:17.014333", + "commit_hash_to_benchmark": "766796ae1e8c07cf2a03b607621c3da6e1f01a31", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/JarvisAgent.json b/arena/JarvisAgent.json new file mode 100644 index 000000000000..f8cc9810f326 --- /dev/null +++ b/arena/JarvisAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/JadeCong/AutoGPT", + "timestamp": "2023-10-17T18:49:16.489653", + "commit_hash_to_benchmark": "0bd5d4420ec168194d5a93f62d890d33ab7d9940", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Jean-Michel.json b/arena/Jean-Michel.json new file mode 100644 index 000000000000..30791d295c41 --- /dev/null +++ b/arena/Jean-Michel.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Yanniswein/Jean-Michel", + "timestamp": "2023-10-30T09:21:14.984080", + "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Job_GPT.json b/arena/Job_GPT.json new file mode 100644 index 000000000000..de73fba89887 --- /dev/null +++ b/arena/Job_GPT.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/SeaField-dev/AutoGPT.git", + "timestamp": "2023-09-25T09:35:03.022273", + "commit_hash_to_benchmark": "a09d2a581f7b435ea55aa32a5fc7bbb093f4d021", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/JoshAgent1.json b/arena/JoshAgent1.json new file mode 100644 index 000000000000..99378066ae72 --- /dev/null +++ b/arena/JoshAgent1.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/joshyorko/AutoGPT", + "timestamp": "2023-09-28T17:05:27.689905", + "commit_hash_to_benchmark": "959e1304d11f126c5a6914c3bb886549638d6b35", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/KnowledgeExtractor.json b/arena/KnowledgeExtractor.json new file mode 100644 index 000000000000..4a184f2fb5ba --- /dev/null +++ b/arena/KnowledgeExtractor.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/arromaljj/KnowledgeExtractor", + "timestamp": "2023-10-04T13:01:50.037123", + "commit_hash_to_benchmark": "1bd85cbc09473c0252928fb849ae8373607d6065", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/LAWYER_EMAD.json b/arena/LAWYER_EMAD.json new file mode 100644 index 000000000000..5d84d0872c49 --- /dev/null +++ b/arena/LAWYER_EMAD.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/emads7/AutoGPT.git", + "timestamp": "2023-10-19T15:06:37.481038", + "commit_hash_to_benchmark": "4b1e8f6e8b4186ec6563301c146fbf3425f92715", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/LHRobot.json b/arena/LHRobot.json new file mode 100644 index 000000000000..98feac3b9220 --- /dev/null +++ b/arena/LHRobot.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/LH-Enterprise/AutoGPT", + "timestamp": "2023-10-07T01:05:31.627432", + "commit_hash_to_benchmark": "b2d53d8d18c754a5b877ffeb9f42d3387c3324fd", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Lab49Agent.json b/arena/Lab49Agent.json new file mode 100644 index 000000000000..cbb9922645db --- /dev/null +++ b/arena/Lab49Agent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/FutureProofTechnology/AutoGPT", + "timestamp": "2023-10-12T10:28:34.275827", + "commit_hash_to_benchmark": "766796ae1e8c07cf2a03b607621c3da6e1f01a31", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/LbAgent.json b/arena/LbAgent.json new file mode 100644 index 000000000000..8ff9c0cc099e --- /dev/null +++ b/arena/LbAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/BiaoLiu2017/AutoGPT", + "timestamp": "2023-10-30T10:20:40.082545", + "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/LegalAgent.json b/arena/LegalAgent.json new file mode 100644 index 000000000000..c57b30f85275 --- /dev/null +++ b/arena/LegalAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/gengrui1983/LegalGPT", + "timestamp": "2023-10-25T02:46:41.860987", + "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Light_Agent.json b/arena/Light_Agent.json new file mode 100644 index 000000000000..17fee68be6f1 --- /dev/null +++ b/arena/Light_Agent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/mohammed-radha-LightRing/AutoGPT", + "timestamp": "2023-10-01T07:10:46.497391", + "commit_hash_to_benchmark": "d6abb27db61142a70defd0c75b53985ea9a71fce", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/LinuzGPT.json b/arena/LinuzGPT.json new file mode 100644 index 000000000000..8cb096f0cf21 --- /dev/null +++ b/arena/LinuzGPT.json @@ -0,0 +1 @@ +{"github_repo_url": "https://github.com/linusaltacc/AutoGPT", "timestamp": "2023-10-23T09:20:51Z", "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/Lirum.json b/arena/Lirum.json new file mode 100644 index 000000000000..da8dddd76a74 --- /dev/null +++ b/arena/Lirum.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/rogerioth/AutoGPT", + "timestamp": "2023-10-12T23:04:51.600862", + "commit_hash_to_benchmark": "38790a27ed2c1b63a301b6a67e7590f2d30de53e", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/MANU.json b/arena/MANU.json new file mode 100644 index 000000000000..7e1caed1f20c --- /dev/null +++ b/arena/MANU.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/manuel-soria/AutoGPT", + "timestamp": "2023-10-07T16:50:11.634586", + "commit_hash_to_benchmark": "a00d880a3fd62373f53a0b0a45c9dcfdb45968e4", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/MEGATRON.json b/arena/MEGATRON.json new file mode 100644 index 000000000000..81182c372e06 --- /dev/null +++ b/arena/MEGATRON.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/razorhasbeen/AutoGPT", + "timestamp": "2023-10-03T11:33:22.091896", + "commit_hash_to_benchmark": "d8d7fc4858a8d13407f6d7da360c6b5d398f2175", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/MOBILE.json b/arena/MOBILE.json new file mode 100644 index 000000000000..13b9c175217b --- /dev/null +++ b/arena/MOBILE.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/nel349/AutoGPT", + "timestamp": "2023-10-08T03:10:40.860972", + "commit_hash_to_benchmark": "683257b697392e5551fb86c81a72728029d12aa0", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Maharathi.json b/arena/Maharathi.json new file mode 100644 index 000000000000..c2a312c8205a --- /dev/null +++ b/arena/Maharathi.json @@ -0,0 +1 @@ +{"github_repo_url": "https://github.com/sampatkalyan/AutoGPTHackathon", "timestamp": "2023-10-02T08:16:27Z", "commit_hash_to_benchmark": "062d286c239dc863ede4ad475d7348698722f5fa", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/MangoAI.json b/arena/MangoAI.json new file mode 100644 index 000000000000..32250c07fe7a --- /dev/null +++ b/arena/MangoAI.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/stargatejy/MangoAI", + "timestamp": "2023-10-24T10:11:38.967772", + "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/MangoAgent-3.json b/arena/MangoAgent-3.json new file mode 100644 index 000000000000..72f5a832577c --- /dev/null +++ b/arena/MangoAgent-3.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/stargatejy/MangoAI", + "timestamp": "2023-10-25T15:41:17.652038", + "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/MangoAgent-4.json b/arena/MangoAgent-4.json new file mode 100644 index 000000000000..b49ad87078b3 --- /dev/null +++ b/arena/MangoAgent-4.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/stargatejy/MangoAI", + "timestamp": "2023-10-27T16:28:23.804390", + "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/MarketResearcherEduRob.json b/arena/MarketResearcherEduRob.json new file mode 100644 index 000000000000..6ee0afb41c66 --- /dev/null +++ b/arena/MarketResearcherEduRob.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/fzoric8/AutoGPT", + "timestamp": "2023-11-01T09:36:16.357944", + "commit_hash_to_benchmark": "bc61ea35b5a52cc948657aac0ed8fc3f3191ec04", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Marx.json b/arena/Marx.json new file mode 100644 index 000000000000..69421b46829d --- /dev/null +++ b/arena/Marx.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/maxletemple/AutoGPT", + "timestamp": "2023-10-18T17:06:20.575710", + "commit_hash_to_benchmark": "e9b64adae9fce180a392c726457e150177e746fb", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Mary.json b/arena/Mary.json new file mode 100644 index 000000000000..a47a8da58441 --- /dev/null +++ b/arena/Mary.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/bigfatball/Auto-GPT.git", + "timestamp": "2023-10-22T23:40:22.765334", + "commit_hash_to_benchmark": "16e266c65fb4620a1b1397532c503fa426ec191d", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Melang.json b/arena/Melang.json new file mode 100644 index 000000000000..5345ede6374a --- /dev/null +++ b/arena/Melang.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Brian-Mwangi-developer/AutoGPT.git", + "timestamp": "2023-10-06T08:50:14.080962", + "commit_hash_to_benchmark": "062d286c239dc863ede4ad475d7348698722f5fa", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Miao.json b/arena/Miao.json new file mode 100644 index 000000000000..f3a169e49841 --- /dev/null +++ b/arena/Miao.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/StefanWan-Durham/AutoGPT.git", + "timestamp": "2023-10-02T15:05:19.789945", + "commit_hash_to_benchmark": "062d286c239dc863ede4ad475d7348698722f5fa", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/MindwareGPT.json b/arena/MindwareGPT.json new file mode 100644 index 000000000000..1be44df5dd12 --- /dev/null +++ b/arena/MindwareGPT.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/0xtotaylor/MindwareGPT.git", + "timestamp": "2023-10-03T14:56:05.228408", + "commit_hash_to_benchmark": "3374fd181852d489e51ee33a25d12a064a0bb55d", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Mira.json b/arena/Mira.json new file mode 100644 index 000000000000..28585c526759 --- /dev/null +++ b/arena/Mira.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/enricofranke/EnricoAssistant", + "timestamp": "2023-10-25T23:21:35.799138", + "commit_hash_to_benchmark": "89d333f3bb422495f21e04bdd2bba3cb8c1a34ae", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/MoTS.json b/arena/MoTS.json new file mode 100644 index 000000000000..efad4ea97f67 --- /dev/null +++ b/arena/MoTS.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/ghd9201/AutoGPT.git", + "timestamp": "2023-10-25T09:04:02.534683", + "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/MojoBurrito.json b/arena/MojoBurrito.json new file mode 100644 index 000000000000..b9c0ad78081e --- /dev/null +++ b/arena/MojoBurrito.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/dawnkelly09/MojoBurrito", + "timestamp": "2023-10-01T20:24:10.596062", + "commit_hash_to_benchmark": "de3e9e702a988c6028cc8b873aeffc9d5d82c572", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/MyAgent.json b/arena/MyAgent.json new file mode 100644 index 000000000000..d6f92e188298 --- /dev/null +++ b/arena/MyAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/AgentService/AutoGPT", + "timestamp": "2023-10-25T20:11:31.811596", + "commit_hash_to_benchmark": "89d333f3bb422495f21e04bdd2bba3cb8c1a34ae", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/MyExample.json b/arena/MyExample.json new file mode 100644 index 000000000000..508515aed709 --- /dev/null +++ b/arena/MyExample.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/gabenitez/AutoGPT", + "timestamp": "2023-10-19T22:00:47.453159", + "commit_hash_to_benchmark": "b4588f6425912316e1512391e4392ca30d61e144", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/MyExampleAgent.json b/arena/MyExampleAgent.json new file mode 100644 index 000000000000..cc3a9f86b7ef --- /dev/null +++ b/arena/MyExampleAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/moizsajid/AutoGPT", + "timestamp": "2023-10-25T20:20:04.910747", + "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/MyFirstAgent.json b/arena/MyFirstAgent.json new file mode 100644 index 000000000000..783c90f5477d --- /dev/null +++ b/arena/MyFirstAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/smaxaddington/AutoGPT", + "timestamp": "2023-10-14T15:27:15.090035", + "commit_hash_to_benchmark": "74ee69daf1c0a2603f19bdb1edcfdf1f4e06bcff", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/MyFistAgent.json b/arena/MyFistAgent.json new file mode 100644 index 000000000000..baafc39a876c --- /dev/null +++ b/arena/MyFistAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/eslam-fakhry/AutoGPT", + "timestamp": "2023-11-02T10:19:58.187866", + "commit_hash_to_benchmark": "d9ec0ac3ad7b48eb44e6403e88d2dc5696fd4950", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/MyTestAgent.json b/arena/MyTestAgent.json new file mode 100644 index 000000000000..a4c28dc7e8fb --- /dev/null +++ b/arena/MyTestAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Penguin-N/AutoGPT.git", + "timestamp": "2023-10-18T14:01:28.986850", + "commit_hash_to_benchmark": "e9b64adae9fce180a392c726457e150177e746fb", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/N.json b/arena/N.json new file mode 100644 index 000000000000..1d8b2dd9f11a --- /dev/null +++ b/arena/N.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/gentaag/AutoGPT", + "timestamp": "2023-10-28T15:16:15.189228", + "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/NASAssistant2.json b/arena/NASAssistant2.json new file mode 100644 index 000000000000..1359a3332975 --- /dev/null +++ b/arena/NASAssistant2.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/IHIaadj/AutoGPT", + "timestamp": "2023-10-07T22:06:59.410391", + "commit_hash_to_benchmark": "7a33af387e6959506eb8f01b49d296defe587e6d", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/NHAN_BOT.json b/arena/NHAN_BOT.json new file mode 100644 index 000000000000..a0e649b0842f --- /dev/null +++ b/arena/NHAN_BOT.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Vannhanhk12/AutoGPT", + "timestamp": "2023-09-28T07:18:38.959135", + "commit_hash_to_benchmark": "a555e936c48bca8c794c7116d62a91628e59ac14", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/NadeemAgent.json b/arena/NadeemAgent.json new file mode 100644 index 000000000000..9898b7c19323 --- /dev/null +++ b/arena/NadeemAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/kiyanwang/AutoGPT", + "timestamp": "2023-10-19T14:11:40.660035", + "commit_hash_to_benchmark": "4b1e8f6e8b4186ec6563301c146fbf3425f92715", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/NanAutoGPT.json b/arena/NanAutoGPT.json new file mode 100644 index 000000000000..8dd47a13047a --- /dev/null +++ b/arena/NanAutoGPT.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/open-nan/NanAutoGPT", + "timestamp": "2023-10-30T10:25:02.617275", + "commit_hash_to_benchmark": "89d333f3bb422495f21e04bdd2bba3cb8c1a34ae", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/NoobSupreme.json b/arena/NoobSupreme.json new file mode 100644 index 000000000000..42208e3d9bdd --- /dev/null +++ b/arena/NoobSupreme.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Ch0daboy/NoobSupreme.git", + "timestamp": "2023-10-01T08:08:13.753099", + "commit_hash_to_benchmark": "a0fba5d1f13d35a1c4a8b7718550677bf62b5101", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/NumberOne.json b/arena/NumberOne.json new file mode 100644 index 000000000000..36c626ca0e03 --- /dev/null +++ b/arena/NumberOne.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/micwin/AutoGPT", + "timestamp": "2023-10-05T17:01:11.784397", + "commit_hash_to_benchmark": "3b7d83a1a6d3fef1d415bfd1d4ba32ca1ba797cc", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Orange.json b/arena/Orange.json new file mode 100644 index 000000000000..4a344241a6d9 --- /dev/null +++ b/arena/Orange.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/lewispeel/AutoGPT", + "timestamp": "2023-10-27T22:57:16.348948", + "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/PAgentAI.json b/arena/PAgentAI.json new file mode 100644 index 000000000000..55e7333e7026 --- /dev/null +++ b/arena/PAgentAI.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/plopez10/GPT", + "timestamp": "2023-10-26T03:25:27.221299", + "commit_hash_to_benchmark": "89d333f3bb422495f21e04bdd2bba3cb8c1a34ae", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Pacific.json b/arena/Pacific.json new file mode 100644 index 000000000000..f7f8d5a3a9c4 --- /dev/null +++ b/arena/Pacific.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/yifeng-qiu/AutoGPTAgent", + "timestamp": "2023-10-04T18:25:34.925806", + "commit_hash_to_benchmark": "1bd85cbc09473c0252928fb849ae8373607d6065", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/ParalegalAgent.json b/arena/ParalegalAgent.json new file mode 100644 index 000000000000..92e4c2513542 --- /dev/null +++ b/arena/ParalegalAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/bRitch022/Auto-GPT", + "timestamp": "2023-10-06T18:48:23.644236", + "commit_hash_to_benchmark": "47eb5124fa97187d7f3fa4036e422cd771cf0ae7", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Pelle.json b/arena/Pelle.json new file mode 100644 index 000000000000..598c0708d2e0 --- /dev/null +++ b/arena/Pelle.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/pilotniq/AutoGPT", + "timestamp": "2023-10-23T19:14:27.176891", + "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Portalen.json b/arena/Portalen.json new file mode 100644 index 000000000000..6e4aa879f45d --- /dev/null +++ b/arena/Portalen.json @@ -0,0 +1 @@ +{"github_repo_url": "https://github.com/erlendjones/AutoGPT", "timestamp": "2023-09-22T20:39:08Z", "commit_hash_to_benchmark": "58d5b0d4a2fcc1bc12ed667db9d62a427a89c1a4", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/Pumu2_agent.json b/arena/Pumu2_agent.json new file mode 100644 index 000000000000..52510f0b035f --- /dev/null +++ b/arena/Pumu2_agent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/xpineda/AutoGPT_xabyvng.git", + "timestamp": "2023-10-20T09:26:07.885410", + "commit_hash_to_benchmark": "2187f66149ffa4bb99f9ca6a11b592fe4d683791", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Q.json b/arena/Q.json new file mode 100644 index 000000000000..9fad0c9cf8de --- /dev/null +++ b/arena/Q.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/CopsGit/AutoGPT", + "timestamp": "2023-10-27T19:07:51.053794", + "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/QA_AGENT.json b/arena/QA_AGENT.json new file mode 100644 index 000000000000..14816293f854 --- /dev/null +++ b/arena/QA_AGENT.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/ada-lovecraft/Ada-GPT", + "timestamp": "2023-09-20T08:14:19.186952", + "commit_hash_to_benchmark": "377d0af228bad019be0a9743c2824c033e039654", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/QuantumQuill.json b/arena/QuantumQuill.json new file mode 100644 index 000000000000..32e78e5eac8a --- /dev/null +++ b/arena/QuantumQuill.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/dleidisch/AutoAgent", + "timestamp": "2023-10-23T18:49:58.499309", + "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/RAGOptimizer.json b/arena/RAGOptimizer.json new file mode 100644 index 000000000000..f87cc692a9f7 --- /dev/null +++ b/arena/RAGOptimizer.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/nel349/AutoGPT", + "timestamp": "2023-10-07T22:51:51.507768", + "commit_hash_to_benchmark": "683257b697392e5551fb86c81a72728029d12aa0", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/RFPScanner.json b/arena/RFPScanner.json new file mode 100644 index 000000000000..bc4ba260d79e --- /dev/null +++ b/arena/RFPScanner.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/vidhatanand/AutoRFP", + "timestamp": "2023-10-09T12:37:08.692968", + "commit_hash_to_benchmark": "f77d383a9f5e66a35d6008bd43cab4d93999cb61", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/RONNIN.json b/arena/RONNIN.json new file mode 100644 index 000000000000..5e1b0ecc8acc --- /dev/null +++ b/arena/RONNIN.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Huarada/AutoGPT", + "timestamp": "2023-10-06T18:11:56.450481", + "commit_hash_to_benchmark": "a55ed27679f608003372feb9eb61f0104ca87858", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/RagsToRiches.json b/arena/RagsToRiches.json new file mode 100644 index 000000000000..7a3669733cab --- /dev/null +++ b/arena/RagsToRiches.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/faichele/AutoGPT", + "timestamp": "2023-09-28T11:01:12.962590", + "commit_hash_to_benchmark": "4f15b1c5825b3f044c901995e3399d4eacf7ec66", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/RandomVampirePictureBot.json b/arena/RandomVampirePictureBot.json new file mode 100644 index 000000000000..0c8b8dc48bcb --- /dev/null +++ b/arena/RandomVampirePictureBot.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/KleinerCodeDrago/AutoGPT", + "timestamp": "2023-09-29T14:06:38.055747", + "commit_hash_to_benchmark": "76c321d6b1a3c6ed938c90149a2954b7dade761a", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Raslebot.json b/arena/Raslebot.json new file mode 100644 index 000000000000..11169825d966 --- /dev/null +++ b/arena/Raslebot.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Jimcau/AutoGPT.git", + "timestamp": "2023-10-16T10:50:47.524483", + "commit_hash_to_benchmark": "2f79caa6b901d006a78c1ac9e69db4465c0f971a", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/ResearchAgent.json b/arena/ResearchAgent.json new file mode 100644 index 000000000000..94855c1ceaa3 --- /dev/null +++ b/arena/ResearchAgent.json @@ -0,0 +1 @@ +{"github_repo_url": "https://github.com/Umar-Azam/AutoGPT-ResearchAgent", "timestamp": "2023-10-23T09:20:51Z", "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/RosterAgent.json b/arena/RosterAgent.json new file mode 100644 index 000000000000..172d48e27773 --- /dev/null +++ b/arena/RosterAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/ricowong0730/AutoGPT", + "timestamp": "2023-10-17T01:17:01.540294", + "commit_hash_to_benchmark": "265255120b1a64d1dd0a3a92ae3a7e697a103ecb", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/SaasWebDev.json b/arena/SaasWebDev.json new file mode 100644 index 000000000000..98324aa371c8 --- /dev/null +++ b/arena/SaasWebDev.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/giggei/AutoGPT", + "timestamp": "2023-10-02T15:44:54.390181", + "commit_hash_to_benchmark": "062d286c239dc863ede4ad475d7348698722f5fa", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/SaveAsPDF2.json b/arena/SaveAsPDF2.json new file mode 100644 index 000000000000..6024d173b95a --- /dev/null +++ b/arena/SaveAsPDF2.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/LFarmbot/AutoFarm", + "timestamp": "2023-10-28T04:32:40.914756", + "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/ShiviBot.json b/arena/ShiviBot.json new file mode 100644 index 000000000000..c9ce171beeba --- /dev/null +++ b/arena/ShiviBot.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/kshivang/DabblerGPT", + "timestamp": "2023-10-07T01:30:06.292423", + "commit_hash_to_benchmark": "b2d53d8d18c754a5b877ffeb9f42d3387c3324fd", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/SkorkobaniecAgent.json b/arena/SkorkobaniecAgent.json new file mode 100644 index 000000000000..7b99a9e78d43 --- /dev/null +++ b/arena/SkorkobaniecAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/RafalSkorka/AutoGPT", + "timestamp": "2023-10-30T19:05:24.676797", + "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/SmartAgent.json b/arena/SmartAgent.json new file mode 100644 index 000000000000..bc2f1563e8a3 --- /dev/null +++ b/arena/SmartAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/AgentService/AutoGPT", + "timestamp": "2023-10-25T20:06:46.743984", + "commit_hash_to_benchmark": "89d333f3bb422495f21e04bdd2bba3cb8c1a34ae", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/SmartGPT.json b/arena/SmartGPT.json new file mode 100644 index 000000000000..fb27875a23f9 --- /dev/null +++ b/arena/SmartGPT.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Mertkaann/AutoGPT.git", + "timestamp": "2023-09-29T21:46:29.940080", + "commit_hash_to_benchmark": "d6abb27db61142a70defd0c75b53985ea9a71fce", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/SouAgent.json b/arena/SouAgent.json new file mode 100644 index 000000000000..6a35c3699078 --- /dev/null +++ b/arena/SouAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/SouSingh/AutoGPT.git", + "timestamp": "2023-10-01T07:26:31.428044", + "commit_hash_to_benchmark": "a0fba5d1f13d35a1c4a8b7718550677bf62b5101", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Stragegy_Steve.json b/arena/Stragegy_Steve.json new file mode 100644 index 000000000000..fc4aa7aaea42 --- /dev/null +++ b/arena/Stragegy_Steve.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/talumo/AutoGPT", + "timestamp": "2023-09-28T14:31:36.771515", + "commit_hash_to_benchmark": "e374e516633b0afca1ab644b378fe1973c455782", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Susan.json b/arena/Susan.json new file mode 100644 index 000000000000..4689ef84e2b2 --- /dev/null +++ b/arena/Susan.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/CodeZeno/Susan", + "timestamp": "2023-11-03T11:29:28.704822", + "commit_hash_to_benchmark": "82fecfae1b4fb5d64050eefa77d8f028292aa8f3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/TEST_TPK.json b/arena/TEST_TPK.json new file mode 100644 index 000000000000..ec2967892521 --- /dev/null +++ b/arena/TEST_TPK.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/seeseesky/AutoGPT", + "timestamp": "2023-10-31T04:31:39.337182", + "commit_hash_to_benchmark": "c3569d1842e6568ab1327e577603e71ad1feb622", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/TLGPT.json b/arena/TLGPT.json new file mode 100644 index 000000000000..a402fcc6a02b --- /dev/null +++ b/arena/TLGPT.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/TheoLawrence86/AutoGPT", + "timestamp": "2023-10-09T14:34:30.182635", + "commit_hash_to_benchmark": "f77d383a9f5e66a35d6008bd43cab4d93999cb61", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/TMarafon.json b/arena/TMarafon.json new file mode 100644 index 000000000000..9828a895bc64 --- /dev/null +++ b/arena/TMarafon.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/TMarafon/AutoGPT", + "timestamp": "2023-10-28T05:34:54.785662", + "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/TRAVIS.json b/arena/TRAVIS.json new file mode 100644 index 000000000000..0e73f8841ca4 --- /dev/null +++ b/arena/TRAVIS.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/tskaggs/AutoGPT", + "timestamp": "2023-10-14T02:33:28.089406", + "commit_hash_to_benchmark": "93e3ec36ed6cd9e5e60585f016ad3bef4e1c52cb", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/TeslaBot.json b/arena/TeslaBot.json new file mode 100644 index 000000000000..e55ae0cd3f85 --- /dev/null +++ b/arena/TeslaBot.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/momokrunic/AutoGPT", + "timestamp": "2023-11-02T17:17:06.663164", + "commit_hash_to_benchmark": "d9ec0ac3ad7b48eb44e6403e88d2dc5696fd4950", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Tessa_AutoGPT_agent.json b/arena/Tessa_AutoGPT_agent.json new file mode 100644 index 000000000000..3f12f4959666 --- /dev/null +++ b/arena/Tessa_AutoGPT_agent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/DelicaTessa/AutoGPT_hackathon", + "timestamp": "2023-10-03T14:10:19.975796", + "commit_hash_to_benchmark": "a0fba5d1f13d35a1c4a8b7718550677bf62b5101", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/TestLbAgent.json b/arena/TestLbAgent.json new file mode 100644 index 000000000000..9c57304508cb --- /dev/null +++ b/arena/TestLbAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/BiaoLiu2017/AutoGPT", + "timestamp": "2023-10-31T03:25:23.064470", + "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/TheAgency.json b/arena/TheAgency.json new file mode 100644 index 000000000000..8470fe1b9982 --- /dev/null +++ b/arena/TheAgency.json @@ -0,0 +1 @@ +{"github_repo_url": "https://github.com/shamantechnology/TheAgency", "timestamp": "2023-10-26T09:22:18Z", "commit_hash_to_benchmark": "3eef81f2579e3ab4822fb9155ee412c597fda9c2", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/TheAgent.json b/arena/TheAgent.json new file mode 100644 index 000000000000..4a515aaa1013 --- /dev/null +++ b/arena/TheAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/BiaoLiu2017/AutoGPT", + "timestamp": "2023-10-31T03:07:04.629241", + "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/TraceLLMAgent.json b/arena/TraceLLMAgent.json new file mode 100644 index 000000000000..d25ff491b0d5 --- /dev/null +++ b/arena/TraceLLMAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/vmwsree/AutoGPT", + "timestamp": "2023-10-15T21:48:38.027553", + "commit_hash_to_benchmark": "74ee69daf1c0a2603f19bdb1edcfdf1f4e06bcff", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/UGYUJI.json b/arena/UGYUJI.json new file mode 100644 index 000000000000..2d0abc304080 --- /dev/null +++ b/arena/UGYUJI.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/ugyuji/AutoGPT", + "timestamp": "2023-10-20T04:42:28.397067", + "commit_hash_to_benchmark": "052802ff8d9354f23620eb8b6a5fd68cda7e5c0e", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/UTC-Crew.json b/arena/UTC-Crew.json new file mode 100644 index 000000000000..832d484f1b56 --- /dev/null +++ b/arena/UTC-Crew.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/isayahc/AutoGPT.git", + "timestamp": "2023-10-04T17:06:48.154911", + "commit_hash_to_benchmark": "062d286c239dc863ede4ad475d7348698722f5fa", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/UmaruAgent.json b/arena/UmaruAgent.json new file mode 100644 index 000000000000..f3168d47a817 --- /dev/null +++ b/arena/UmaruAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/g1331/Auto-GPT", + "timestamp": "2023-10-16T13:51:10.464650", + "commit_hash_to_benchmark": "2f79caa6b901d006a78c1ac9e69db4465c0f971a", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/UniAgent.json b/arena/UniAgent.json new file mode 100644 index 000000000000..19d710fa21bf --- /dev/null +++ b/arena/UniAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/JovanKanevche/AutoGPT", + "timestamp": "2023-10-19T17:04:49.626683", + "commit_hash_to_benchmark": "4b1e8f6e8b4186ec6563301c146fbf3425f92715", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Verkiezingsprogrammas.json b/arena/Verkiezingsprogrammas.json new file mode 100644 index 000000000000..4a18be40c74e --- /dev/null +++ b/arena/Verkiezingsprogrammas.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/bergje0810/AutoGPT", + "timestamp": "2023-10-11T11:47:16.993332", + "commit_hash_to_benchmark": "57bcbdf45c6c1493a4e5f6a4e72594ea13c10f93", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/WRITER.json b/arena/WRITER.json new file mode 100644 index 000000000000..63849f43f4fc --- /dev/null +++ b/arena/WRITER.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/ezirmusitua/AutoGPT", + "timestamp": "2023-10-27T09:43:35.725996", + "commit_hash_to_benchmark": "21b809794a90cf6f9a6aa41f179f420045becadc", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/WYC.json b/arena/WYC.json new file mode 100644 index 000000000000..0620b0aab264 --- /dev/null +++ b/arena/WYC.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/131250208/AutoGPT_YC", + "timestamp": "2023-10-20T07:42:11.493899", + "commit_hash_to_benchmark": "9219bfba0e028a557109b8e39c0fd91c1df243f8", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/WarlockAgent.json b/arena/WarlockAgent.json new file mode 100644 index 000000000000..55977a9f343e --- /dev/null +++ b/arena/WarlockAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/warlockee/AutoGPT-wl", + "timestamp": "2023-10-27T21:30:11.455084", + "commit_hash_to_benchmark": "6f66376bb8a4116330fe867d9dff83f938f7aa14", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/WeatherInformer.json b/arena/WeatherInformer.json new file mode 100644 index 000000000000..4cc94787f168 --- /dev/null +++ b/arena/WeatherInformer.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/colour-me-bibi/Auto-GPT", + "timestamp": "2023-09-19T14:11:53.195135", + "commit_hash_to_benchmark": "2098e192da0ec8eecf0010ae62704e6727dfa42a", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/WiseAgent.json b/arena/WiseAgent.json new file mode 100644 index 000000000000..02c03c0a2043 --- /dev/null +++ b/arena/WiseAgent.json @@ -0,0 +1 @@ +{"github_repo_url": "https://github.com/Ashish-Soni08/SoniGPT", "timestamp": "2023-10-08T18:39:38Z", "commit_hash_to_benchmark": "b52aba4ef545add8fb6c7f8009615cb38e24db80", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/XXY.json b/arena/XXY.json new file mode 100644 index 000000000000..849438def548 --- /dev/null +++ b/arena/XXY.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/xuxiangyang/AutoGPT", + "timestamp": "2023-10-14T04:40:39.828483", + "commit_hash_to_benchmark": "93e3ec36ed6cd9e5e60585f016ad3bef4e1c52cb", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/YOU.json b/arena/YOU.json new file mode 100644 index 000000000000..64629cf403d1 --- /dev/null +++ b/arena/YOU.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/gentaag/AutoGPT", + "timestamp": "2023-10-28T14:03:12.555466", + "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/YoudaoAutoGPT.json b/arena/YoudaoAutoGPT.json new file mode 100644 index 000000000000..8e81970eb093 --- /dev/null +++ b/arena/YoudaoAutoGPT.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/jiezhangGt/AutoGPT", + "timestamp": "2023-10-20T03:02:17.342168", + "commit_hash_to_benchmark": "4b1e8f6e8b4186ec6563301c146fbf3425f92715", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/YoutubePost_agent.json b/arena/YoutubePost_agent.json new file mode 100644 index 000000000000..46b7d81b798f --- /dev/null +++ b/arena/YoutubePost_agent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/ramirez3rg/Auto-GPT", + "timestamp": "2023-09-21T20:35:24.266598", + "commit_hash_to_benchmark": "c72a35e92e4f95aca25221e216c3a49d0dbc739b", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Yui3.json b/arena/Yui3.json new file mode 100644 index 000000000000..439183005801 --- /dev/null +++ b/arena/Yui3.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/YuiChan04233/AutoGPT1", + "timestamp": "2023-10-08T02:03:48.189959", + "commit_hash_to_benchmark": "b2d53d8d18c754a5b877ffeb9f42d3387c3324fd", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Yutan_agent.json b/arena/Yutan_agent.json new file mode 100644 index 000000000000..468f5f37352b --- /dev/null +++ b/arena/Yutan_agent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/zyt329/AutoGPT", + "timestamp": "2023-09-29T21:47:23.741942", + "commit_hash_to_benchmark": "d6abb27db61142a70defd0c75b53985ea9a71fce", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/ZJgpt.json b/arena/ZJgpt.json new file mode 100644 index 000000000000..0ac3d2567454 --- /dev/null +++ b/arena/ZJgpt.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/jiezhangGt/AutoGPT", + "timestamp": "2023-10-20T04:04:28.198603", + "commit_hash_to_benchmark": "4b1e8f6e8b4186ec6563301c146fbf3425f92715", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Zeus.json b/arena/Zeus.json new file mode 100644 index 000000000000..0529b52c4421 --- /dev/null +++ b/arena/Zeus.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/MerlimarCode/ZeusGPT", + "timestamp": "2023-10-08T02:31:50.347357", + "commit_hash_to_benchmark": "0d5c2a98c071336e1bb48716cc25d85df2656ced", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/ZhaoJianAutoGPT.json b/arena/ZhaoJianAutoGPT.json new file mode 100644 index 000000000000..b2aa60f7ba43 --- /dev/null +++ b/arena/ZhaoJianAutoGPT.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/zhaojianchn/AutoGPT", + "timestamp": "2023-10-17T09:41:06.331671", + "commit_hash_to_benchmark": "1eadc64dc0a693c7c9de77ddaef857f3a36f7950", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/ZoeyGPT.json b/arena/ZoeyGPT.json new file mode 100644 index 000000000000..c2be10804ce2 --- /dev/null +++ b/arena/ZoeyGPT.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/carylaw/FatGPT", + "timestamp": "2023-10-25T10:03:47.295810", + "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Zoidberg.json b/arena/Zoidberg.json new file mode 100644 index 000000000000..a56f26d43e1b --- /dev/null +++ b/arena/Zoidberg.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/KapitanFernand/Zoidberg", + "timestamp": "2023-10-24T09:09:27.540179", + "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/aWOL.json b/arena/aWOL.json new file mode 100644 index 000000000000..62dc8026138b --- /dev/null +++ b/arena/aWOL.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/aodrasa/aWOL", + "timestamp": "2023-10-11T01:24:01.516559", + "commit_hash_to_benchmark": "0856f6806177b30989b2be78004e059658efbbb4", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/a_reverent_heart.json b/arena/a_reverent_heart.json new file mode 100644 index 000000000000..c0233bc389d3 --- /dev/null +++ b/arena/a_reverent_heart.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/iamlockelightning/IAutoGPT", + "timestamp": "2023-10-08T08:03:31.352877", + "commit_hash_to_benchmark": "e99e9b6181f091a9625ef9b922dac15dd5f0a885", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/accidental-agent.json b/arena/accidental-agent.json new file mode 100644 index 000000000000..853068771b43 --- /dev/null +++ b/arena/accidental-agent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/siddharthsarda/accidental-agent", + "timestamp": "2023-09-20T08:07:08.337479", + "commit_hash_to_benchmark": "377d0af228bad019be0a9743c2824c033e039654", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/actor_tester.json b/arena/actor_tester.json new file mode 100644 index 000000000000..ec1f0138e944 --- /dev/null +++ b/arena/actor_tester.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/calmglow/mbtiagent", + "timestamp": "2023-10-25T13:15:04.296302", + "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/admariner.json b/arena/admariner.json new file mode 100644 index 000000000000..2811c5d5c011 --- /dev/null +++ b/arena/admariner.json @@ -0,0 +1 @@ +{"github_repo_url": "https://github.com/admariner/AutoGPT", "timestamp": "2023-10-23T09:20:51Z", "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/ag1.json b/arena/ag1.json new file mode 100644 index 000000000000..0dcfe64d43c8 --- /dev/null +++ b/arena/ag1.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/gensy421/AutoGensy", + "timestamp": "2023-10-26T06:31:27.588150", + "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/agent2.json b/arena/agent2.json new file mode 100644 index 000000000000..54b1247ca944 --- /dev/null +++ b/arena/agent2.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/lukadumancic/AutoGPT", + "timestamp": "2023-10-28T16:08:43.603669", + "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/agentSmith.json b/arena/agentSmith.json new file mode 100644 index 000000000000..805e720e8fb3 --- /dev/null +++ b/arena/agentSmith.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Nameless8243/AgentSmith", + "timestamp": "2023-10-28T20:05:53.168061", + "commit_hash_to_benchmark": "21b809794a90cf6f9a6aa41f179f420045becadc", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/agent_2.json b/arena/agent_2.json new file mode 100644 index 000000000000..1e169e0eecea --- /dev/null +++ b/arena/agent_2.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Exstor/AutoGPT", + "timestamp": "2023-10-31T20:56:49.313875", + "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/agentgpt.json b/arena/agentgpt.json new file mode 100644 index 000000000000..15aed81c4a37 --- /dev/null +++ b/arena/agentgpt.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/UdaySagar-Git/AutoGPT.git", + "timestamp": "2023-10-24T05:24:58.972720", + "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/agsCehAgent.json b/arena/agsCehAgent.json new file mode 100644 index 000000000000..e628e79a3b99 --- /dev/null +++ b/arena/agsCehAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/albags/AutoGPT.git", + "timestamp": "2023-10-19T11:30:12.759675", + "commit_hash_to_benchmark": "4b1e8f6e8b4186ec6563301c146fbf3425f92715", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/ai_assistant.json b/arena/ai_assistant.json new file mode 100644 index 000000000000..2a0d85dee973 --- /dev/null +++ b/arena/ai_assistant.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/xhjxhj001/AutoGPT", + "timestamp": "2023-10-23T12:05:13.923218", + "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/aiaudit.json b/arena/aiaudit.json new file mode 100644 index 000000000000..e1ecbb1dd719 --- /dev/null +++ b/arena/aiaudit.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/bigsml/AutoGPT.git", + "timestamp": "2023-10-12T07:05:18.886183", + "commit_hash_to_benchmark": "766796ae1e8c07cf2a03b607621c3da6e1f01a31", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/aiwowo.json b/arena/aiwowo.json new file mode 100644 index 000000000000..3412ba3cd364 --- /dev/null +++ b/arena/aiwowo.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/jeffxtang/AutoGPT", + "timestamp": "2023-10-09T05:25:37.720553", + "commit_hash_to_benchmark": "027054ae02657c37be0d28502bb5a22823eae9d9", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/aixiaoxin.json b/arena/aixiaoxin.json new file mode 100644 index 000000000000..a6fe001c5738 --- /dev/null +++ b/arena/aixiaoxin.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/aixiaoxin123/AutoGPT", + "timestamp": "2023-10-27T05:44:49.265845", + "commit_hash_to_benchmark": "6c9152a95c8994898c47c85ea90ba58e0cc02c28", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/akela.json b/arena/akela.json new file mode 100644 index 000000000000..9c811d288316 --- /dev/null +++ b/arena/akela.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/SarahGrevy/AutoGPT", + "timestamp": "2023-10-20T18:56:31.210825", + "commit_hash_to_benchmark": "32300906c9aafea8c550fa2f9edcc113fbfc512c", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/analystgpt.json b/arena/analystgpt.json new file mode 100644 index 000000000000..9227c97a1ed7 --- /dev/null +++ b/arena/analystgpt.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/vleonidas/AutoGPT", + "timestamp": "2023-10-20T16:46:11.806635", + "commit_hash_to_benchmark": "825c3adf62879fa9f91a19c11010336de5c98bfc", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/arbetsformedlingen.json b/arena/arbetsformedlingen.json new file mode 100644 index 000000000000..5afc4316e335 --- /dev/null +++ b/arena/arbetsformedlingen.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/esaiaswestberg/AutoGPT", + "timestamp": "2023-11-02T12:35:40.378520", + "commit_hash_to_benchmark": "d9ec0ac3ad7b48eb44e6403e88d2dc5696fd4950", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/assistant1.json b/arena/assistant1.json new file mode 100644 index 000000000000..8bb51d2fea0e --- /dev/null +++ b/arena/assistant1.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/BarneyChambers/AutoGPT", + "timestamp": "2023-10-16T18:35:05.779206", + "commit_hash_to_benchmark": "546e08a5cf2413fcfb857e2c41d21c80c3364218", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/autoai.json b/arena/autoai.json new file mode 100644 index 000000000000..5197905241d7 --- /dev/null +++ b/arena/autoai.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/maanvithag/AutoGPT", + "timestamp": "2023-10-09T16:19:12.986257", + "commit_hash_to_benchmark": "3bd8ae48433fa46552719de050ded576a3bef4b9", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/autocoder.json b/arena/autocoder.json new file mode 100644 index 000000000000..8d1fd33e6ce9 --- /dev/null +++ b/arena/autocoder.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/mtx-light/AutoGPT", + "timestamp": "2023-10-29T07:33:17.228393", + "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/autogbd.json b/arena/autogbd.json new file mode 100644 index 000000000000..77f7f4b5ddaa --- /dev/null +++ b/arena/autogbd.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/kylaro/AutoGBD", + "timestamp": "2023-10-09T11:45:26.637129", + "commit_hash_to_benchmark": "f77d383a9f5e66a35d6008bd43cab4d93999cb61", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/autogpt-hackathon2.json b/arena/autogpt-hackathon2.json new file mode 100644 index 000000000000..41960393bd46 --- /dev/null +++ b/arena/autogpt-hackathon2.json @@ -0,0 +1 @@ +{"github_repo_url": "https://github.com/ThisisHubert/AutoGPT-hackathon", "timestamp": "2023-10-23T09:20:51Z", "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/autogpt.json b/arena/autogpt.json new file mode 100644 index 000000000000..931aa3aa5cf8 --- /dev/null +++ b/arena/autogpt.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Significant-Gravitas/AutoGPT", + "timestamp": "2023-11-15T07:22:09.723393", + "commit_hash_to_benchmark": "fa357dd13928baa4d1e30054bc75edc5d68b08f1", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/autogpt_hackathon.json b/arena/autogpt_hackathon.json new file mode 100644 index 000000000000..41960393bd46 --- /dev/null +++ b/arena/autogpt_hackathon.json @@ -0,0 +1 @@ +{"github_repo_url": "https://github.com/ThisisHubert/AutoGPT-hackathon", "timestamp": "2023-10-23T09:20:51Z", "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/autogpt_hackathon1.json b/arena/autogpt_hackathon1.json new file mode 100644 index 000000000000..41960393bd46 --- /dev/null +++ b/arena/autogpt_hackathon1.json @@ -0,0 +1 @@ +{"github_repo_url": "https://github.com/ThisisHubert/AutoGPT-hackathon", "timestamp": "2023-10-23T09:20:51Z", "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/autogpt_warlock.json b/arena/autogpt_warlock.json new file mode 100644 index 000000000000..5f6e9c0a52e8 --- /dev/null +++ b/arena/autogpt_warlock.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/warlockee/AutoGPT-wl", + "timestamp": "2023-10-27T00:46:05.266939", + "commit_hash_to_benchmark": "6f66376bb8a4116330fe867d9dff83f938f7aa14", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/autogptagent.json b/arena/autogptagent.json new file mode 100644 index 000000000000..589001597df6 --- /dev/null +++ b/arena/autogptagent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/YasienDwieb/AutoGPT", + "timestamp": "2023-11-04T21:13:17.223261", + "commit_hash_to_benchmark": "0b55de62dc61a33ccf944d80b6d55c730286e07d", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/avengaGPT.json b/arena/avengaGPT.json new file mode 100644 index 000000000000..f95163865726 --- /dev/null +++ b/arena/avengaGPT.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/sebabetz/AutoGPT", + "timestamp": "2023-10-24T05:25:26.059512", + "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/babe_perphorator_.json b/arena/babe_perphorator_.json new file mode 100644 index 000000000000..ed3396907e02 --- /dev/null +++ b/arena/babe_perphorator_.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/beavishead/automaton.git", + "timestamp": "2023-10-11T09:43:19.859956", + "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/baby_agent.json b/arena/baby_agent.json new file mode 100644 index 000000000000..ee8f386cc338 --- /dev/null +++ b/arena/baby_agent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/keli-61/AutoK", + "timestamp": "2023-10-19T07:39:13.300108", + "commit_hash_to_benchmark": "1a30d00194b46f8b923bab191404ce9123e34bdf", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/bait.json b/arena/bait.json new file mode 100644 index 000000000000..9c886bfba9bc --- /dev/null +++ b/arena/bait.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/a0balaton/AutoGPT", + "timestamp": "2023-11-03T07:38:34.616504", + "commit_hash_to_benchmark": "d9ec0ac3ad7b48eb44e6403e88d2dc5696fd4950", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/beyond.json b/arena/beyond.json new file mode 100644 index 000000000000..dd51cc2febfa --- /dev/null +++ b/arena/beyond.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/sn0wdown/AutoGPT", + "timestamp": "2023-10-25T07:22:09.723393", + "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/bigman.json b/arena/bigman.json new file mode 100644 index 000000000000..00d4395820f3 --- /dev/null +++ b/arena/bigman.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/bathrobe/autogpt", + "timestamp": "2023-10-04T18:32:29.402925", + "commit_hash_to_benchmark": "1bd85cbc09473c0252928fb849ae8373607d6065", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/billy.json b/arena/billy.json new file mode 100644 index 000000000000..44253ededb99 --- /dev/null +++ b/arena/billy.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/khelil/AutoGPT", + "timestamp": "2023-10-14T17:51:54.044334", + "commit_hash_to_benchmark": "74ee69daf1c0a2603f19bdb1edcfdf1f4e06bcff", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/bingoTesting.json b/arena/bingoTesting.json new file mode 100644 index 000000000000..a8fd1e210e0c --- /dev/null +++ b/arena/bingoTesting.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/bingotyty/AutoGPT", + "timestamp": "2023-11-06T04:16:38.612948", + "commit_hash_to_benchmark": "a1d60878141116641ea864ef6de7ca6142e9534c", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/bosaeed_agent.json b/arena/bosaeed_agent.json new file mode 100644 index 000000000000..e2a1dcc97c97 --- /dev/null +++ b/arena/bosaeed_agent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/bosaeed/AutoGPT.git", + "timestamp": "2023-10-03T15:31:04.721867", + "commit_hash_to_benchmark": "3da29eae45683457131ee8736bedae7e2a74fbba", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/bot.json b/arena/bot.json new file mode 100644 index 000000000000..3552e7447346 --- /dev/null +++ b/arena/bot.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Sampson2016/AutoGPT", + "timestamp": "2023-09-26T07:44:15.563183", + "commit_hash_to_benchmark": "3d4307a848880c8509e8356bbb9146f0e6f917f4", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/bot01.json b/arena/bot01.json new file mode 100644 index 000000000000..eca05f793a85 --- /dev/null +++ b/arena/bot01.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Arthur-Heng/AutoGPT", + "timestamp": "2023-10-12T04:16:30.658280", + "commit_hash_to_benchmark": "766796ae1e8c07cf2a03b607621c3da6e1f01a31", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/buddy.json b/arena/buddy.json new file mode 100644 index 000000000000..3b2653f9d065 --- /dev/null +++ b/arena/buddy.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/danhamilt/AutoGPT", + "timestamp": "2023-10-09T01:07:11.246485", + "commit_hash_to_benchmark": "b52aba4ef545add8fb6c7f8009615cb38e24db80", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/burt.json b/arena/burt.json new file mode 100644 index 000000000000..7f9acb5ef2c9 --- /dev/null +++ b/arena/burt.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/D4t4DrU1d/burt", + "timestamp": "2023-10-05T14:00:59.740170", + "commit_hash_to_benchmark": "a55ed27679f608003372feb9eb61f0104ca87858", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/business.json b/arena/business.json new file mode 100644 index 000000000000..c086daeaad61 --- /dev/null +++ b/arena/business.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/danielfebrero/AutoGPT", + "timestamp": "2023-10-21T16:12:05.424875", + "commit_hash_to_benchmark": "415b4ceed1417d0b21d87d7d4ea0cd38943e264f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/byl.json b/arena/byl.json new file mode 100644 index 000000000000..c57a574d51b8 --- /dev/null +++ b/arena/byl.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/yoonh12/byl", + "timestamp": "2023-10-01T08:36:20.309716", + "commit_hash_to_benchmark": "a0fba5d1f13d35a1c4a8b7718550677bf62b5101", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/career-agent.json b/arena/career-agent.json new file mode 100644 index 000000000000..ba2877abffc6 --- /dev/null +++ b/arena/career-agent.json @@ -0,0 +1 @@ +{"github_repo_url": "https://github.com/asifdotpy/CareerGPT", "timestamp": "2023-10-23T09:20:51Z", "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/caud.json b/arena/caud.json new file mode 100644 index 000000000000..63dcaeef4241 --- /dev/null +++ b/arena/caud.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Swiftyos/CAUD", + "timestamp": "2023-10-07T15:44:40.526955", + "commit_hash_to_benchmark": "7a33af387e6959506eb8f01b49d296defe587e6d", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/ccace.json b/arena/ccace.json new file mode 100644 index 000000000000..ae1628cd8383 --- /dev/null +++ b/arena/ccace.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/ccsnow127/AutoGPT", + "timestamp": "2023-10-23T08:28:38.119283", + "commit_hash_to_benchmark": "e9b64adae9fce180a392c726457e150177e746fb", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/chappigpt.json b/arena/chappigpt.json new file mode 100644 index 000000000000..a136db128551 --- /dev/null +++ b/arena/chappigpt.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Wiradjuri/chappi.git", + "timestamp": "2023-10-08T06:20:43.527806", + "commit_hash_to_benchmark": "e99e9b6181f091a9625ef9b922dac15dd5f0a885", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/chappyAi.json b/arena/chappyAi.json new file mode 100644 index 000000000000..3da98b8c727e --- /dev/null +++ b/arena/chappyAi.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Wiradjuri/chappi.git", + "timestamp": "2023-10-08T06:50:59.175273", + "commit_hash_to_benchmark": "e99e9b6181f091a9625ef9b922dac15dd5f0a885", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/chatgpt_taller.json b/arena/chatgpt_taller.json new file mode 100644 index 000000000000..996c78970f46 --- /dev/null +++ b/arena/chatgpt_taller.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/leobusar/AutoGPT", + "timestamp": "2023-10-10T04:06:42.480712", + "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/chengshu.json b/arena/chengshu.json new file mode 100644 index 000000000000..e4cffdb81d2f --- /dev/null +++ b/arena/chengshu.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/skadai/AutoGPT", + "timestamp": "2023-10-26T06:54:04.511066", + "commit_hash_to_benchmark": "89d333f3bb422495f21e04bdd2bba3cb8c1a34ae", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/chenzo.json b/arena/chenzo.json new file mode 100644 index 000000000000..9717e91a74f2 --- /dev/null +++ b/arena/chenzo.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/chenzino/AutoGPT", + "timestamp": "2023-10-05T00:25:37.141373", + "commit_hash_to_benchmark": "7f89b8aae8748bc88b29ca94c3604ba540bbef94", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/cislerk2.json b/arena/cislerk2.json new file mode 100644 index 000000000000..3d4c9dd1009f --- /dev/null +++ b/arena/cislerk2.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/cislerk/AutoGPT", + "timestamp": "2023-10-10T21:05:38.064647", + "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/codebutler.json b/arena/codebutler.json new file mode 100644 index 000000000000..c42fae82705f --- /dev/null +++ b/arena/codebutler.json @@ -0,0 +1 @@ +{"github_repo_url": "https://github.com/AJV009/AutoGPT", "timestamp": "2023-10-26T05:03:09Z", "commit_hash_to_benchmark": "03a95a5333db52ac5b129306e47423b638d649b0", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/coder_first.json b/arena/coder_first.json new file mode 100644 index 000000000000..5e8048a328b0 --- /dev/null +++ b/arena/coder_first.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/mtx-light/AutoGPT", + "timestamp": "2023-10-29T07:22:26.774555", + "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/contentstrategy.json b/arena/contentstrategy.json new file mode 100644 index 000000000000..d1b9dd5aff77 --- /dev/null +++ b/arena/contentstrategy.json @@ -0,0 +1 @@ +{"github_repo_url": "https://github.com/banderson12/AutoGPT", "timestamp": "2023-10-21T04:13:13Z", "commit_hash_to_benchmark": "415b4ceed1417d0b21d87d7d4ea0cd38943e264f", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/cssupdater.json b/arena/cssupdater.json new file mode 100644 index 000000000000..91959adcbe8d --- /dev/null +++ b/arena/cssupdater.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/mplummeridge/AutoGPT", + "timestamp": "2023-10-24T01:25:47.059251", + "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/da-agent.json b/arena/da-agent.json new file mode 100644 index 000000000000..78bce3e7e029 --- /dev/null +++ b/arena/da-agent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/rayzh-lab/AutoGPT", + "timestamp": "2023-10-12T13:37:26.964846", + "commit_hash_to_benchmark": "766796ae1e8c07cf2a03b607621c3da6e1f01a31", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/date-buffer.json b/arena/date-buffer.json new file mode 100644 index 000000000000..ea91442b8099 --- /dev/null +++ b/arena/date-buffer.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/jackbullen/AutoGPT", + "timestamp": "2023-10-14T03:55:27.817045", + "commit_hash_to_benchmark": "93e3ec36ed6cd9e5e60585f016ad3bef4e1c52cb", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/davidtest1.json b/arena/davidtest1.json new file mode 100644 index 000000000000..fbaa9445129e --- /dev/null +++ b/arena/davidtest1.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/ningzero/AutoGPTTest", + "timestamp": "2023-11-01T10:08:15.790059", + "commit_hash_to_benchmark": "bc61ea35b5a52cc948657aac0ed8fc3f3191ec04", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/davidtestagent.json b/arena/davidtestagent.json new file mode 100644 index 000000000000..0fd27d2b58f3 --- /dev/null +++ b/arena/davidtestagent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/ningzero/AutoGPTTest", + "timestamp": "2023-11-01T09:29:35.474709", + "commit_hash_to_benchmark": "bc61ea35b5a52cc948657aac0ed8fc3f3191ec04", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/dda.json b/arena/dda.json new file mode 100644 index 000000000000..3f628dd87ae3 --- /dev/null +++ b/arena/dda.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/aiherrera1/AutoGPT", + "timestamp": "2023-10-15T18:03:04.765167", + "commit_hash_to_benchmark": "74ee69daf1c0a2603f19bdb1edcfdf1f4e06bcff", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/decision-maker.json b/arena/decision-maker.json new file mode 100644 index 000000000000..623522fe247b --- /dev/null +++ b/arena/decision-maker.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/vishnub1626/AutoGPT", + "timestamp": "2023-09-28T11:33:39.045838", + "commit_hash_to_benchmark": "4f15b1c5825b3f044c901995e3399d4eacf7ec66", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/dev_agent.json b/arena/dev_agent.json new file mode 100644 index 000000000000..25aec8ac7d7d --- /dev/null +++ b/arena/dev_agent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/pedrovvitor/AutoGPT", + "timestamp": "2023-10-15T14:25:07.534330", + "commit_hash_to_benchmark": "93e3ec36ed6cd9e5e60585f016ad3bef4e1c52cb", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/devagent.json b/arena/devagent.json new file mode 100644 index 000000000000..f65809e14687 --- /dev/null +++ b/arena/devagent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/w6m6/kkgpt", + "timestamp": "2023-10-20T08:29:25.708364", + "commit_hash_to_benchmark": "052802ff8d9354f23620eb8b6a5fd68cda7e5c0e", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/dive2code.json b/arena/dive2code.json new file mode 100644 index 000000000000..2280c1bef980 --- /dev/null +++ b/arena/dive2code.json @@ -0,0 +1 @@ +{"github_repo_url": "https://github.com/qwdqwqdwqd/autogpt", "timestamp": "2023-10-25T17:55:18Z", "commit_hash_to_benchmark": "c8d239ef6492d7fe30c099909e01a2eede678b70", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/dndagent.json b/arena/dndagent.json new file mode 100644 index 000000000000..9617293dbe72 --- /dev/null +++ b/arena/dndagent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/xSudoNymx/AutoGPT", + "timestamp": "2023-10-13T04:48:12.424344", + "commit_hash_to_benchmark": "38790a27ed2c1b63a301b6a67e7590f2d30de53e", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/dy_agent.json b/arena/dy_agent.json new file mode 100644 index 000000000000..fd5c981b1322 --- /dev/null +++ b/arena/dy_agent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/dyabel/AutoGPT", + "timestamp": "2023-09-24T07:25:55.818276", + "commit_hash_to_benchmark": "a09d2a581f7b435ea55aa32a5fc7bbb093f4d021", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/dy_agent2.json b/arena/dy_agent2.json new file mode 100644 index 000000000000..c6ae45ee69bb --- /dev/null +++ b/arena/dy_agent2.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/dyabel/AutoGPT", + "timestamp": "2023-09-24T09:30:13.885689", + "commit_hash_to_benchmark": "a09d2a581f7b435ea55aa32a5fc7bbb093f4d021", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/easn.json b/arena/easn.json new file mode 100644 index 000000000000..c7ba6bcad731 --- /dev/null +++ b/arena/easn.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/qazwsxdshb/AutoGPT", + "timestamp": "2023-10-21T08:00:39.287093", + "commit_hash_to_benchmark": "415b4ceed1417d0b21d87d7d4ea0cd38943e264f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/eddy.json b/arena/eddy.json new file mode 100644 index 000000000000..12e625b4c049 --- /dev/null +++ b/arena/eddy.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/ltxctdbnn/AutoGPT", + "timestamp": "2023-10-17T08:42:59.396592", + "commit_hash_to_benchmark": "1eadc64dc0a693c7c9de77ddaef857f3a36f7950", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/ekc911_agent.json b/arena/ekc911_agent.json new file mode 100644 index 000000000000..f755e78eadbc --- /dev/null +++ b/arena/ekc911_agent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/seacrest/ekc911GPT.git", + "timestamp": "2023-10-05T03:09:36.845932", + "commit_hash_to_benchmark": "73ef89e03a719ec1b2f01b0f04e9b1f64ffb2a7d", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/engineer.json b/arena/engineer.json new file mode 100644 index 000000000000..ef0a2f12eafe --- /dev/null +++ b/arena/engineer.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/bary12/AutoGPT", + "timestamp": "2023-10-18T07:21:47.127207", + "commit_hash_to_benchmark": "e9b64adae9fce180a392c726457e150177e746fb", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/evlyn.json b/arena/evlyn.json new file mode 100644 index 000000000000..115c41e113a2 --- /dev/null +++ b/arena/evlyn.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/TimothyZhang/AutoGPT", + "timestamp": "2023-09-26T04:13:50.107902", + "commit_hash_to_benchmark": "e8aae7731919ee37444fd0871d05bff38f03ab66", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/evo-ninja.json b/arena/evo-ninja.json new file mode 100644 index 000000000000..e7ec02cb9e01 --- /dev/null +++ b/arena/evo-ninja.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/polywrap/evo.ninja", + "timestamp": "2023-10-26T09:05:21.013962", + "commit_hash_to_benchmark": "8832a1008607ab8a27de81fbea69bc73c3febb6f", + "branch_to_benchmark": "dev" +} \ No newline at end of file diff --git a/arena/evo.json b/arena/evo.json new file mode 100644 index 000000000000..48de830feaf5 --- /dev/null +++ b/arena/evo.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/abdllahdev/evo", + "timestamp": "2023-09-24T04:36:48.363989", + "commit_hash_to_benchmark": "075529ddc9cbca45ff98f0701baed9b89a712c23", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/faran.json b/arena/faran.json new file mode 100644 index 000000000000..d67d39544caa --- /dev/null +++ b/arena/faran.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/faranbutt/AutoGPT", + "timestamp": "2023-10-03T11:37:15.047378", + "commit_hash_to_benchmark": "949ab477a87cfb7a3668d7961e9443922081e098", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/first-agent.json b/arena/first-agent.json new file mode 100644 index 000000000000..34eb08d44108 --- /dev/null +++ b/arena/first-agent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/DG1202/AutoGPT.git", + "timestamp": "2023-10-22T15:08:00.869208", + "commit_hash_to_benchmark": "16e266c65fb4620a1b1397532c503fa426ec191d", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/foobar.json b/arena/foobar.json new file mode 100644 index 000000000000..e502066763c3 --- /dev/null +++ b/arena/foobar.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/sosthoff/AutoGPT", + "timestamp": "2023-10-07T17:23:59.763991", + "commit_hash_to_benchmark": "a00d880a3fd62373f53a0b0a45c9dcfdb45968e4", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/frankgarcia.json b/arena/frankgarcia.json new file mode 100644 index 000000000000..b02dd557dd99 --- /dev/null +++ b/arena/frankgarcia.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/variavista/AutoGPT", + "timestamp": "2023-09-28T07:03:33.140557", + "commit_hash_to_benchmark": "a555e936c48bca8c794c7116d62a91628e59ac14", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/fritzgpt.json b/arena/fritzgpt.json new file mode 100644 index 000000000000..e9bae34ffb8f --- /dev/null +++ b/arena/fritzgpt.json @@ -0,0 +1 @@ +{"github_repo_url": "https://github.com/bsenst/FritzGPT", "timestamp": "2023-10-07T11:54:36Z", "commit_hash_to_benchmark": "bb960ffb9fadc45fe4fb5277053caa831f196578", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/fst.json b/arena/fst.json new file mode 100644 index 000000000000..97216c4dd2ff --- /dev/null +++ b/arena/fst.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/onewesong/AutoGPT", + "timestamp": "2023-10-10T07:04:45.268630", + "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/fuzz_gen.json b/arena/fuzz_gen.json new file mode 100644 index 000000000000..87273ae48207 --- /dev/null +++ b/arena/fuzz_gen.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/stplaydog/AutoGPT", + "timestamp": "2023-09-29T16:15:33.360163", + "commit_hash_to_benchmark": "76c321d6b1a3c6ed938c90149a2954b7dade761a", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/gaby_agent.json b/arena/gaby_agent.json new file mode 100644 index 000000000000..7b57d6dae83c --- /dev/null +++ b/arena/gaby_agent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://ggonza156: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@github.com/ggonza156/AutoGPT", + "timestamp": "2023-10-21T23:52:39.199690", + "commit_hash_to_benchmark": "eda21d51921899756bf866cf5c4d0f2dcd3e2e23", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/gen_fuzz.json b/arena/gen_fuzz.json new file mode 100644 index 000000000000..c6486156ccaa --- /dev/null +++ b/arena/gen_fuzz.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/stplaydog/AutoGPT", + "timestamp": "2023-09-29T17:45:56.921760", + "commit_hash_to_benchmark": "76c321d6b1a3c6ed938c90149a2954b7dade761a", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/ghostcoder.json b/arena/ghostcoder.json new file mode 100644 index 000000000000..738061238484 --- /dev/null +++ b/arena/ghostcoder.json @@ -0,0 +1 @@ +{"github_repo_url": "https://github.com/aorwall/AutoGPT", "timestamp": "2023-10-26T07:02:18Z", "commit_hash_to_benchmark": "580b4467851b879ef6ce369128e8c7a0399f8877", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/gipity.json b/arena/gipity.json new file mode 100644 index 000000000000..84d2d893e19f --- /dev/null +++ b/arena/gipity.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/koad/gpt", + "timestamp": "2023-10-02T19:47:45.668048", + "commit_hash_to_benchmark": "163ab75379e1ee7792f50d4d70a1f482ca9cb6a1", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/gpt-dev-engineer-agent.json b/arena/gpt-dev-engineer-agent.json new file mode 100644 index 000000000000..080c9ab046b6 --- /dev/null +++ b/arena/gpt-dev-engineer-agent.json @@ -0,0 +1 @@ +{"github_repo_url": "https://github.com/ATheorell/AutoGPTArenaHack", "timestamp": "2023-10-26T09:33:03Z", "commit_hash_to_benchmark": "1e4f2dc004b92b9f236543674f94fb9f0af9bb2e", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/gpt-eng-forge.json b/arena/gpt-eng-forge.json new file mode 100644 index 000000000000..348120b3a452 --- /dev/null +++ b/arena/gpt-eng-forge.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/pbharrin/AutoGPT", + "timestamp": "2023-09-26T17:55:18.530567", + "commit_hash_to_benchmark": "a09d2a581f7b435ea55aa32a5fc7bbb093f4d021", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/gpt-engineer.json b/arena/gpt-engineer.json new file mode 100644 index 000000000000..080c9ab046b6 --- /dev/null +++ b/arena/gpt-engineer.json @@ -0,0 +1 @@ +{"github_repo_url": "https://github.com/ATheorell/AutoGPTArenaHack", "timestamp": "2023-10-26T09:33:03Z", "commit_hash_to_benchmark": "1e4f2dc004b92b9f236543674f94fb9f0af9bb2e", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/gpt_for_beans.json b/arena/gpt_for_beans.json new file mode 100644 index 000000000000..5f9e89282d83 --- /dev/null +++ b/arena/gpt_for_beans.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/xiazaiba7/AutoGPT.git", + "timestamp": "2023-11-02T06:07:34.435957", + "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/hall_oto.json b/arena/hall_oto.json new file mode 100644 index 000000000000..09928183c37f --- /dev/null +++ b/arena/hall_oto.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/helloworld4774/AutoGPT.git", + "timestamp": "2023-10-01T17:47:00.644268", + "commit_hash_to_benchmark": "26cf7c2e3f7b8f61ecda9e301f7a4b36f2b14f2f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/han.json b/arena/han.json new file mode 100644 index 000000000000..8cf8cb54c963 --- /dev/null +++ b/arena/han.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Hanhan0831/AutoGPT", + "timestamp": "2023-10-14T01:01:58.300995", + "commit_hash_to_benchmark": "93e3ec36ed6cd9e5e60585f016ad3bef4e1c52cb", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/happy_guy.json b/arena/happy_guy.json new file mode 100644 index 000000000000..d1df91da3f71 --- /dev/null +++ b/arena/happy_guy.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/jianglonghui/AutoGPT", + "timestamp": "2023-11-03T08:54:39.949387", + "commit_hash_to_benchmark": "d9ec0ac3ad7b48eb44e6403e88d2dc5696fd4950", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/hello.json b/arena/hello.json new file mode 100644 index 000000000000..44d8836c8f67 --- /dev/null +++ b/arena/hello.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/ldnvnbl/AutoGPT", + "timestamp": "2023-10-20T09:37:16.860422", + "commit_hash_to_benchmark": "2187f66149ffa4bb99f9ca6a11b592fe4d683791", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/hodri.json b/arena/hodri.json new file mode 100644 index 000000000000..32e489bfc565 --- /dev/null +++ b/arena/hodri.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/bilisim1995/AutoGPT", + "timestamp": "2023-10-27T10:51:20.447157", + "commit_hash_to_benchmark": "f4985395a94da84b79252bd4d88e040472e1bf6f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/houxe.json b/arena/houxe.json new file mode 100644 index 000000000000..ab5a7072cc2e --- /dev/null +++ b/arena/houxe.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/jiarung/AutoGPTTest", + "timestamp": "2023-10-30T08:30:59.320850", + "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/icode.json b/arena/icode.json new file mode 100644 index 000000000000..d71f8df81452 --- /dev/null +++ b/arena/icode.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/krishnaji/AutoGPT", + "timestamp": "2023-10-13T01:09:31.395541", + "commit_hash_to_benchmark": "38790a27ed2c1b63a301b6a67e7590f2d30de53e", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/iku2.json b/arena/iku2.json new file mode 100644 index 000000000000..63b33adfbc18 --- /dev/null +++ b/arena/iku2.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/IkuOhama/AutoGPT", + "timestamp": "2023-09-27T22:46:33.754238", + "commit_hash_to_benchmark": "793ff1c163bb0f9bd3e0c788b4978b8dc193ba6a", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/illynet.json b/arena/illynet.json new file mode 100644 index 000000000000..269222fc6900 --- /dev/null +++ b/arena/illynet.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/illyx1/AutoGPT.git", + "timestamp": "2023-10-26T06:51:32.589776", + "commit_hash_to_benchmark": "89d333f3bb422495f21e04bdd2bba3cb8c1a34ae", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/illynetV2.json b/arena/illynetV2.json new file mode 100644 index 000000000000..005672b39def --- /dev/null +++ b/arena/illynetV2.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/illyx1/AutoGPT.git", + "timestamp": "2023-10-26T13:14:45.725000", + "commit_hash_to_benchmark": "19175badeefc1325f3fa1a7797ddcfb913c23076", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/illyx1.json b/arena/illyx1.json new file mode 100644 index 000000000000..9cedd5c60b71 --- /dev/null +++ b/arena/illyx1.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/illyx1/AutoGPT.git", + "timestamp": "2023-10-26T12:36:26.810636", + "commit_hash_to_benchmark": "19175badeefc1325f3fa1a7797ddcfb913c23076", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/info-retrieval.json b/arena/info-retrieval.json new file mode 100644 index 000000000000..1aa51aac7043 --- /dev/null +++ b/arena/info-retrieval.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/paperMoose/AutoGPT", + "timestamp": "2023-10-07T21:38:11.070180", + "commit_hash_to_benchmark": "a00d880a3fd62373f53a0b0a45c9dcfdb45968e4", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/ivangpt_agent.json b/arena/ivangpt_agent.json new file mode 100644 index 000000000000..edf940b2236e --- /dev/null +++ b/arena/ivangpt_agent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/ivanliu1989/Auto-GPT", + "timestamp": "2023-10-29T11:24:30.873532", + "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/jarvis2.json b/arena/jarvis2.json new file mode 100644 index 000000000000..c628f8f54a8c --- /dev/null +++ b/arena/jarvis2.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/MissingDLL/AutoGPT", + "timestamp": "2023-10-08T15:23:46.256775", + "commit_hash_to_benchmark": "e99e9b6181f091a9625ef9b922dac15dd5f0a885", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/jarvis3.json b/arena/jarvis3.json new file mode 100644 index 000000000000..c54000f16456 --- /dev/null +++ b/arena/jarvis3.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/MissingDLL/AutoGPT", + "timestamp": "2023-10-08T15:58:33.790030", + "commit_hash_to_benchmark": "e99e9b6181f091a9625ef9b922dac15dd5f0a885", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/jaxbob1.json b/arena/jaxbob1.json new file mode 100644 index 000000000000..db115ceb2be3 --- /dev/null +++ b/arena/jaxbob1.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/redthing1/AutoGPT", + "timestamp": "2023-10-05T20:02:22.372414", + "commit_hash_to_benchmark": "3b7d83a1a6d3fef1d415bfd1d4ba32ca1ba797cc", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/job_apply.json b/arena/job_apply.json new file mode 100644 index 000000000000..afbeed4e911c --- /dev/null +++ b/arena/job_apply.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/tkbeili/AutoGPT", + "timestamp": "2023-10-01T04:49:20.239338", + "commit_hash_to_benchmark": "a0fba5d1f13d35a1c4a8b7718550677bf62b5101", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/jonesyboi.json b/arena/jonesyboi.json new file mode 100644 index 000000000000..93b617c172ae --- /dev/null +++ b/arena/jonesyboi.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/neilrjones/AutoGPT", + "timestamp": "2023-10-18T02:39:02.039894", + "commit_hash_to_benchmark": "d173dd772dfbcce1b75148271857092bc8c22b5c", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/justwondering.json b/arena/justwondering.json new file mode 100644 index 000000000000..0d27545a785c --- /dev/null +++ b/arena/justwondering.json @@ -0,0 +1 @@ +{"github_repo_url": "https://github.com/tbxy09/JustWondering", "timestamp": "2023-10-26T09:48:15Z", "commit_hash_to_benchmark": "b52fea9ba7510adb8c1e7e5cfb83f5fa181d73cf", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/kingmitch.json b/arena/kingmitch.json new file mode 100644 index 000000000000..304ea0521581 --- /dev/null +++ b/arena/kingmitch.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/mitch11223/AutoGPT.git", + "timestamp": "2023-10-20T17:15:31.044252", + "commit_hash_to_benchmark": "825c3adf62879fa9f91a19c11010336de5c98bfc", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/lawk.json b/arena/lawk.json new file mode 100644 index 000000000000..09d5cab74629 --- /dev/null +++ b/arena/lawk.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/xiaolongtuan-yuan/AutoGPT", + "timestamp": "2023-10-26T06:18:01.049166", + "commit_hash_to_benchmark": "89d333f3bb422495f21e04bdd2bba3cb8c1a34ae", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/lcdegpt.json b/arena/lcdegpt.json new file mode 100644 index 000000000000..637e1e1fa8cd --- /dev/null +++ b/arena/lcdegpt.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/tablc/lcdegpt", + "timestamp": "2023-10-17T07:00:24.125505", + "commit_hash_to_benchmark": "1eadc64dc0a693c7c9de77ddaef857f3a36f7950", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/letst.json b/arena/letst.json new file mode 100644 index 000000000000..0a0d582afa1a --- /dev/null +++ b/arena/letst.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/jianhuanggo/AutoTestTest", + "timestamp": "2023-10-16T19:07:43.009481", + "commit_hash_to_benchmark": "546e08a5cf2413fcfb857e2c41d21c80c3364218", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/letstest.json b/arena/letstest.json new file mode 100644 index 000000000000..5862da1a7907 --- /dev/null +++ b/arena/letstest.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/jianhuanggo/AutoTestTest", + "timestamp": "2023-10-16T18:38:28.787259", + "commit_hash_to_benchmark": "546e08a5cf2413fcfb857e2c41d21c80c3364218", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/lilAgent.json b/arena/lilAgent.json new file mode 100644 index 000000000000..cbd9f2fb0e96 --- /dev/null +++ b/arena/lilAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/mvuthegoat/AutoGPT.git", + "timestamp": "2023-10-29T17:17:08.476300", + "commit_hash_to_benchmark": "9a30e0f9a43fe05005e36f0bad8531e3a92fd9e6", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/linggong.json b/arena/linggong.json new file mode 100644 index 000000000000..c89fd2fe4c50 --- /dev/null +++ b/arena/linggong.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/linggong2023/AutoGPT", + "timestamp": "2023-10-24T12:40:35.679665", + "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/liuzh.json b/arena/liuzh.json new file mode 100644 index 000000000000..5b95e218d6b0 --- /dev/null +++ b/arena/liuzh.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Hanzhang-lang/AutoGPT_zh", + "timestamp": "2023-10-24T10:25:02.790189", + "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/ltzAgent.json b/arena/ltzAgent.json new file mode 100644 index 000000000000..59635f03c100 --- /dev/null +++ b/arena/ltzAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/ltzmaxwell/AutoGPT", + "timestamp": "2023-10-25T08:58:41.646491", + "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/martingpt.json b/arena/martingpt.json new file mode 100644 index 000000000000..849f42003589 --- /dev/null +++ b/arena/martingpt.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/martinpeng/AutoGPT", + "timestamp": "2023-10-18T05:30:19.072793", + "commit_hash_to_benchmark": "e9b64adae9fce180a392c726457e150177e746fb", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/medical-agent.json b/arena/medical-agent.json new file mode 100644 index 000000000000..47e0a6a08d16 --- /dev/null +++ b/arena/medical-agent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/daviddhc20120601/AutoGPT", + "timestamp": "2023-11-02T02:08:34.264727", + "commit_hash_to_benchmark": "78e92234d63a69b5471da0c0e62ce820a9109dd4", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/metware.json b/arena/metware.json new file mode 100644 index 000000000000..8f433581c401 --- /dev/null +++ b/arena/metware.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/chenxuya/AutoGPT", + "timestamp": "2023-10-23T02:23:48.775561", + "commit_hash_to_benchmark": "2187f66149ffa4bb99f9ca6a11b592fe4d683791", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/miniAgent.json b/arena/miniAgent.json new file mode 100644 index 000000000000..ad71b21b92d0 --- /dev/null +++ b/arena/miniAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/bigzz/AutoGPT", + "timestamp": "2023-10-23T02:41:41.828607", + "commit_hash_to_benchmark": "1a30d00194b46f8b923bab191404ce9123e34bdf", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/minister_agent.json b/arena/minister_agent.json new file mode 100644 index 000000000000..b66f0b76a608 --- /dev/null +++ b/arena/minister_agent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/miandai/AutoGPT", + "timestamp": "2023-10-25T11:58:34.781500", + "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/misslu.json b/arena/misslu.json new file mode 100644 index 000000000000..21dc02a45e4c --- /dev/null +++ b/arena/misslu.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/JasonZhang95/AutoGPT", + "timestamp": "2023-10-02T11:37:30.488121", + "commit_hash_to_benchmark": "062d286c239dc863ede4ad475d7348698722f5fa", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/mljar-agent.json b/arena/mljar-agent.json new file mode 100644 index 000000000000..70c2e7b6d8d8 --- /dev/null +++ b/arena/mljar-agent.json @@ -0,0 +1 @@ +{"github_repo_url": "https://github.com/mljar/mljar-agent", "timestamp": "2023-10-25T14:04:51Z", "commit_hash_to_benchmark": "2fbc4d6ef48f0201c046b649e7bc74b9d11ae4e5", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/momo.json b/arena/momo.json new file mode 100644 index 000000000000..b2045bccc607 --- /dev/null +++ b/arena/momo.json @@ -0,0 +1 @@ +{"github_repo_url": "https://github.com/UICJohn/AutoGPT", "timestamp": "2023-10-19T09:52:19Z", "commit_hash_to_benchmark": "3aa92c082ac6912b45583b39d59a13cfda665322", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/monthly_summary.json b/arena/monthly_summary.json new file mode 100644 index 000000000000..3f222a61eaaa --- /dev/null +++ b/arena/monthly_summary.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/corpetty/AutoGPT", + "timestamp": "2023-09-26T19:43:56.005780", + "commit_hash_to_benchmark": "cf630e4f2cee04fd935612f95308322cd9eb1df7", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/mrSabelotodo.json b/arena/mrSabelotodo.json new file mode 100644 index 000000000000..4d8a49f6cf0e --- /dev/null +++ b/arena/mrSabelotodo.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/joslangarica/AutoGPT.git", + "timestamp": "2023-10-03T01:11:32.290733", + "commit_hash_to_benchmark": "949ab477a87cfb7a3668d7961e9443922081e098", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/myGPT.json b/arena/myGPT.json new file mode 100644 index 000000000000..f5592ec06fc9 --- /dev/null +++ b/arena/myGPT.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Tianxu-Jia/AutoGPT.git", + "timestamp": "2023-10-03T10:59:48.149445", + "commit_hash_to_benchmark": "949ab477a87cfb7a3668d7961e9443922081e098", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/my_AutoGPT.json b/arena/my_AutoGPT.json new file mode 100644 index 000000000000..2b48e64bdb67 --- /dev/null +++ b/arena/my_AutoGPT.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Tianxu-Jia/AutoGPT.git", + "timestamp": "2023-10-03T08:57:28.681756", + "commit_hash_to_benchmark": "949ab477a87cfb7a3668d7961e9443922081e098", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/my_fx_agent.json b/arena/my_fx_agent.json new file mode 100644 index 000000000000..314e63482591 --- /dev/null +++ b/arena/my_fx_agent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Significant-Gravitas/AutoGPT.git", + "timestamp": "2023-10-18T07:09:36.565783", + "commit_hash_to_benchmark": "e9b64adae9fce180a392c726457e150177e746fb", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/my_gpt.json b/arena/my_gpt.json new file mode 100644 index 000000000000..2eb7006726ce --- /dev/null +++ b/arena/my_gpt.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/dawnchen123/AutoGPT", + "timestamp": "2023-11-01T02:08:06.032041", + "commit_hash_to_benchmark": "c65b71d51d8f849663172c5a128953b4ca92b2b0", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/mygent.json b/arena/mygent.json new file mode 100644 index 000000000000..5eda9ff63128 --- /dev/null +++ b/arena/mygent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/prashanthi-instalily/AutoGPT", + "timestamp": "2023-10-24T13:31:28.287257", + "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/nawalj.json b/arena/nawalj.json new file mode 100644 index 000000000000..0506380f1732 --- /dev/null +++ b/arena/nawalj.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/nawaljunaid/AutoGPT.git", + "timestamp": "2023-10-03T18:41:12.930097", + "commit_hash_to_benchmark": "3374fd181852d489e51ee33a25d12a064a0bb55d", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/newAgent.json b/arena/newAgent.json new file mode 100644 index 000000000000..9ace7df0a0e1 --- /dev/null +++ b/arena/newAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/alexsoshnikov/AutoGPT", + "timestamp": "2023-10-10T09:27:10.249840", + "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/northfork.json b/arena/northfork.json new file mode 100644 index 000000000000..0b5076ce738f --- /dev/null +++ b/arena/northfork.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Sewen/AutoGPT", + "timestamp": "2023-09-26T07:18:29.975526", + "commit_hash_to_benchmark": "3d4307a848880c8509e8356bbb9146f0e6f917f4", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/od_agent_1.json b/arena/od_agent_1.json new file mode 100644 index 000000000000..068becf683dc --- /dev/null +++ b/arena/od_agent_1.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/mattsinnock/AutoGPT", + "timestamp": "2023-10-05T01:13:15.930770", + "commit_hash_to_benchmark": "73ef89e03a719ec1b2f01b0f04e9b1f64ffb2a7d", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/operationAgent.json b/arena/operationAgent.json new file mode 100644 index 000000000000..f4587aaa07cf --- /dev/null +++ b/arena/operationAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/KMing-L/AutoGPT", + "timestamp": "2023-10-09T02:21:56.002832", + "commit_hash_to_benchmark": "2d865cc9e6d0b3c7f10777849adf9492b6400904", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/personal-al-website.json b/arena/personal-al-website.json new file mode 100644 index 000000000000..905ae4ade427 --- /dev/null +++ b/arena/personal-al-website.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Hazzari/AutoGPT", + "timestamp": "2023-10-01T11:59:23.504561", + "commit_hash_to_benchmark": "a0fba5d1f13d35a1c4a8b7718550677bf62b5101", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/piGPT.json b/arena/piGPT.json new file mode 100644 index 000000000000..a01cb6c4a5bc --- /dev/null +++ b/arena/piGPT.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/pihanya/AutoGPT", + "timestamp": "2023-10-06T20:37:37.445255", + "commit_hash_to_benchmark": "abf88fe5097770b1da3383a19208b5a23e2371f3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/pipeline.json b/arena/pipeline.json new file mode 100644 index 000000000000..4ce4eed21dff --- /dev/null +++ b/arena/pipeline.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/geesugar/AutoGPT", + "timestamp": "2023-09-26T04:52:08.379642", + "commit_hash_to_benchmark": "075529ddc9cbca45ff98f0701baed9b89a712c23", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/podcast_agent.json b/arena/podcast_agent.json new file mode 100644 index 000000000000..6b7487bfab3d --- /dev/null +++ b/arena/podcast_agent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/aliumujib/AutoGPT", + "timestamp": "2023-10-28T06:03:18.488676", + "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/potato.json b/arena/potato.json new file mode 100644 index 000000000000..c78cec9d60c6 --- /dev/null +++ b/arena/potato.json @@ -0,0 +1 @@ +{"github_repo_url": "https://github.com/volkov/AutoGPT", "timestamp": "2023-10-23T05:24:11Z", "commit_hash_to_benchmark": "7d2532c1814d624725e7a1fce8831dc0def27fb8", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/project_assitant.json b/arena/project_assitant.json new file mode 100644 index 000000000000..239a7c92198e --- /dev/null +++ b/arena/project_assitant.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/milog1994/AutoGPT.git", + "timestamp": "2023-10-30T21:08:25.083221", + "commit_hash_to_benchmark": "d9fbd26b8563e5f59d705623bae0d5cf9c9499c7", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/project_master.json b/arena/project_master.json new file mode 100644 index 000000000000..79e0f5a234cd --- /dev/null +++ b/arena/project_master.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/milog1994/AutoGPT.git", + "timestamp": "2023-10-30T21:14:18.974130", + "commit_hash_to_benchmark": "d9fbd26b8563e5f59d705623bae0d5cf9c9499c7", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/project_review.json b/arena/project_review.json new file mode 100644 index 000000000000..e5889d49a1b5 --- /dev/null +++ b/arena/project_review.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/oneforce/AutoGPT", + "timestamp": "2023-10-24T09:51:05.658251", + "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/prometheus.json b/arena/prometheus.json new file mode 100644 index 000000000000..bcd8f6660358 --- /dev/null +++ b/arena/prometheus.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/yashrahurikar23/prometheus", + "timestamp": "2023-10-04T15:21:16.474459", + "commit_hash_to_benchmark": "1bd85cbc09473c0252928fb849ae8373607d6065", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/proudgpt.json b/arena/proudgpt.json new file mode 100644 index 000000000000..383a4a2f8707 --- /dev/null +++ b/arena/proudgpt.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/OmarHory/Star-Agent", + "timestamp": "2023-10-01T22:11:15.978902", + "commit_hash_to_benchmark": "8252a2fa8fee852a22093bf7fd8755f86c6b0ad5", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/qinghu3.json b/arena/qinghu3.json new file mode 100644 index 000000000000..06b4a4d943de --- /dev/null +++ b/arena/qinghu3.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/QingHu1227/AutoGPT.git", + "timestamp": "2023-11-06T04:11:34.227212", + "commit_hash_to_benchmark": "a1d60878141116641ea864ef6de7ca6142e9534c", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/ra.json b/arena/ra.json new file mode 100644 index 000000000000..b29e96cecd28 --- /dev/null +++ b/arena/ra.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/aramfaghfouri/AutoGPT", + "timestamp": "2023-10-23T18:03:39.069151", + "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/ra1.json b/arena/ra1.json new file mode 100644 index 000000000000..4b50158c6468 --- /dev/null +++ b/arena/ra1.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/aramfaghfouri/AutoGPT", + "timestamp": "2023-10-23T18:12:20.095032", + "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/rachael.json b/arena/rachael.json new file mode 100644 index 000000000000..fe57a0c5ddfb --- /dev/null +++ b/arena/rachael.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/dotdust/rachael.git", + "timestamp": "2023-10-08T13:18:35.946639", + "commit_hash_to_benchmark": "e99e9b6181f091a9625ef9b922dac15dd5f0a885", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/raindrop.json b/arena/raindrop.json new file mode 100644 index 000000000000..10decc9c878d --- /dev/null +++ b/arena/raindrop.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/tianbinraindrop/AutoGPT", + "timestamp": "2023-10-01T02:24:57.822495", + "commit_hash_to_benchmark": "a0fba5d1f13d35a1c4a8b7718550677bf62b5101", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/researchGPT.json b/arena/researchGPT.json new file mode 100644 index 000000000000..3784933f0b7e --- /dev/null +++ b/arena/researchGPT.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/gty3310/AutoGPT", + "timestamp": "2023-10-09T23:36:29.771968", + "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/researchGPT2.json b/arena/researchGPT2.json new file mode 100644 index 000000000000..eadb82df0a15 --- /dev/null +++ b/arena/researchGPT2.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/gty3310/AutoGPT", + "timestamp": "2023-10-17T15:22:36.628578", + "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/research_analyst.json b/arena/research_analyst.json new file mode 100644 index 000000000000..675df1ad8dcf --- /dev/null +++ b/arena/research_analyst.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/aramfaghfouri/AutoGPT", + "timestamp": "2023-10-23T17:53:54.235178", + "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/robita.json b/arena/robita.json new file mode 100644 index 000000000000..15f3d44ac75e --- /dev/null +++ b/arena/robita.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/atetsuka/AutoGPT", + "timestamp": "2023-10-02T07:16:13.845473", + "commit_hash_to_benchmark": "7ec92d8c063fc041eefd9522450e4ef52e5a34da", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/robot.json b/arena/robot.json new file mode 100644 index 000000000000..3f1eded5cf8e --- /dev/null +++ b/arena/robot.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/brisklad/AutoGPT", + "timestamp": "2023-10-15T13:49:47.384228", + "commit_hash_to_benchmark": "74ee69daf1c0a2603f19bdb1edcfdf1f4e06bcff", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/searchagent.json b/arena/searchagent.json new file mode 100644 index 000000000000..8136c1345685 --- /dev/null +++ b/arena/searchagent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/sanjeevsaara/AutoGPT", + "timestamp": "2023-10-16T00:01:53.051453", + "commit_hash_to_benchmark": "74ee69daf1c0a2603f19bdb1edcfdf1f4e06bcff", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/set.json b/arena/set.json new file mode 100644 index 000000000000..14efa0819c50 --- /dev/null +++ b/arena/set.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/victorrica/AutoGPT", + "timestamp": "2023-10-24T05:12:51.971269", + "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/sgpt.json b/arena/sgpt.json new file mode 100644 index 000000000000..cf2ab22c0ee6 --- /dev/null +++ b/arena/sgpt.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/ya5has/sgpt", + "timestamp": "2023-11-02T05:51:01.446153", + "commit_hash_to_benchmark": "78e92234d63a69b5471da0c0e62ce820a9109dd4", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/shivi.json b/arena/shivi.json new file mode 100644 index 000000000000..e7ed40a85015 --- /dev/null +++ b/arena/shivi.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/kshivang/DabblerGPT", + "timestamp": "2023-10-07T01:39:16.601657", + "commit_hash_to_benchmark": "b2d53d8d18c754a5b877ffeb9f42d3387c3324fd", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/sky.json b/arena/sky.json new file mode 100644 index 000000000000..49690196df75 --- /dev/null +++ b/arena/sky.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/hmslsky/Auto-GPT", + "timestamp": "2023-10-31T15:48:50.123435", + "commit_hash_to_benchmark": "c65b71d51d8f849663172c5a128953b4ca92b2b0", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/smith.json b/arena/smith.json new file mode 100644 index 000000000000..c3bfd5978fd3 --- /dev/null +++ b/arena/smith.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/kevinboudot/AutoGPT", + "timestamp": "2023-10-11T12:25:09.516293", + "commit_hash_to_benchmark": "57bcbdf45c6c1493a4e5f6a4e72594ea13c10f93", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/songyalei.json b/arena/songyalei.json new file mode 100644 index 000000000000..2c3b7dcc3032 --- /dev/null +++ b/arena/songyalei.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/songyalei/AutoGPT", + "timestamp": "2023-11-16T07:11:39.746384", + "commit_hash_to_benchmark": "fa357dd13928baa4d1e30054bc75edc5d68b08f1", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/sql.json b/arena/sql.json new file mode 100644 index 000000000000..a9b357a8038c --- /dev/null +++ b/arena/sql.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/monsterooo/AutoGPT", + "timestamp": "2023-09-26T06:46:35.721082", + "commit_hash_to_benchmark": "bec207568a93e38bff971525c53612813aa60730", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/stefan.json b/arena/stefan.json new file mode 100644 index 000000000000..96987be6bad8 --- /dev/null +++ b/arena/stefan.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/sutefu23/AutoGPT", + "timestamp": "2023-10-21T01:03:06.362579", + "commit_hash_to_benchmark": "03e56fece5008d119dd5ae97da57eb4db3d14a1d", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/stockAgent.json b/arena/stockAgent.json new file mode 100644 index 000000000000..b4a9c5d3d492 --- /dev/null +++ b/arena/stockAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/SnowYoung/StockAgent", + "timestamp": "2023-10-19T09:49:44.372589", + "commit_hash_to_benchmark": "f62651ff3f1ece5520916bee7ee441e1949855f9", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/swarms.json b/arena/swarms.json new file mode 100644 index 000000000000..7bd572350e01 --- /dev/null +++ b/arena/swarms.json @@ -0,0 +1 @@ +{"github_repo_url": "https://github.com/ZackBradshaw/Auto-Swarms", "timestamp": "2023-10-16T15:03:21Z", "commit_hash_to_benchmark": "96b591c6f0918265e2256cb9c76ca2ff50f3983f", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/tdev.json b/arena/tdev.json new file mode 100644 index 000000000000..68518c814bab --- /dev/null +++ b/arena/tdev.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/miguelcas12/tdev.git", + "timestamp": "2023-09-26T17:36:53.829436", + "commit_hash_to_benchmark": "cf630e4f2cee04fd935612f95308322cd9eb1df7", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/teacher.json b/arena/teacher.json new file mode 100644 index 000000000000..0e0291c006c5 --- /dev/null +++ b/arena/teacher.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/miaowacao/AutoGPT1", + "timestamp": "2023-10-16T07:21:48.209351", + "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/test-tpk.json b/arena/test-tpk.json new file mode 100644 index 000000000000..87f4f4e2c42f --- /dev/null +++ b/arena/test-tpk.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/seeseesky/AutoGPT", + "timestamp": "2023-10-27T04:06:10.599340", + "commit_hash_to_benchmark": "21b809794a90cf6f9a6aa41f179f420045becadc", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/test.json b/arena/test.json new file mode 100644 index 000000000000..00b762a09b78 --- /dev/null +++ b/arena/test.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Nivek92/AutoGPT", + "timestamp": "2023-10-01T15:46:07.871808", + "commit_hash_to_benchmark": "a0fba5d1f13d35a1c4a8b7718550677bf62b5101", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/test1.json b/arena/test1.json new file mode 100644 index 000000000000..e9f9ff00a30d --- /dev/null +++ b/arena/test1.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/mplummeridge/AutoGPT", + "timestamp": "2023-10-24T01:06:24.100385", + "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/testGPT.json b/arena/testGPT.json new file mode 100644 index 000000000000..f1078ed65a14 --- /dev/null +++ b/arena/testGPT.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/PZON2/testGPT", + "timestamp": "2023-10-15T12:06:56.373935", + "commit_hash_to_benchmark": "74ee69daf1c0a2603f19bdb1edcfdf1f4e06bcff", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/thebestagent.json b/arena/thebestagent.json new file mode 100644 index 000000000000..0003b82b62cf --- /dev/null +++ b/arena/thebestagent.json @@ -0,0 +1 @@ +{"github_repo_url": "https://github.com/hisandan/AutoGPT", "timestamp": "2023-10-09T14:10:20Z", "commit_hash_to_benchmark": "da5109b07d94ae3de1b3399ad2be6171b14cb304", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/theone.json b/arena/theone.json new file mode 100644 index 000000000000..0003b82b62cf --- /dev/null +++ b/arena/theone.json @@ -0,0 +1 @@ +{"github_repo_url": "https://github.com/hisandan/AutoGPT", "timestamp": "2023-10-09T14:10:20Z", "commit_hash_to_benchmark": "da5109b07d94ae3de1b3399ad2be6171b14cb304", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/tiffGPT.json b/arena/tiffGPT.json new file mode 100644 index 000000000000..84833b637f37 --- /dev/null +++ b/arena/tiffGPT.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/darkcyber-ninja/AutoGPT", + "timestamp": "2023-10-31T18:25:58.281391", + "commit_hash_to_benchmark": "c65b71d51d8f849663172c5a128953b4ca92b2b0", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/trend_agent.json b/arena/trend_agent.json new file mode 100644 index 000000000000..ba7d6839c524 --- /dev/null +++ b/arena/trend_agent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Asmedeus998/AutoGPT.git", + "timestamp": "2023-10-01T23:04:42.429686", + "commit_hash_to_benchmark": "8252a2fa8fee852a22093bf7fd8755f86c6b0ad5", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/umiuni_agent.json b/arena/umiuni_agent.json new file mode 100644 index 000000000000..0dd76a137ef0 --- /dev/null +++ b/arena/umiuni_agent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/umiuni-community/AutoGPT.git", + "timestamp": "2023-10-01T11:37:00.284821", + "commit_hash_to_benchmark": "a0fba5d1f13d35a1c4a8b7718550677bf62b5101", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/uply.json b/arena/uply.json new file mode 100644 index 000000000000..f3058753ef9b --- /dev/null +++ b/arena/uply.json @@ -0,0 +1 @@ +{"github_repo_url": "https://github.com/uply23333/Uply-GPT", "timestamp": "2023-10-20T00:48:01Z", "commit_hash_to_benchmark": "052802ff8d9354f23620eb8b6a5fd68cda7e5c0e", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/url-to-lead.json b/arena/url-to-lead.json new file mode 100644 index 000000000000..f7564d8636ca --- /dev/null +++ b/arena/url-to-lead.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/nikolajlovenhardt/AutoGPT", + "timestamp": "2023-11-01T15:18:00.402718", + "commit_hash_to_benchmark": "78e92234d63a69b5471da0c0e62ce820a9109dd4", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/v-gpt.json b/arena/v-gpt.json new file mode 100644 index 000000000000..1537194575d0 --- /dev/null +++ b/arena/v-gpt.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Varun565/AutoGPT", + "timestamp": "2023-10-05T03:17:36.972978", + "commit_hash_to_benchmark": "3374fd181852d489e51ee33a25d12a064a0bb55d", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/victor2-0.json b/arena/victor2-0.json new file mode 100644 index 000000000000..b984c1bcca5e --- /dev/null +++ b/arena/victor2-0.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/victorleduc/AutoGPT", + "timestamp": "2023-10-23T23:35:53.044545", + "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/web_developer.json b/arena/web_developer.json new file mode 100644 index 000000000000..7f1f9c4afb38 --- /dev/null +++ b/arena/web_developer.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/paul726/AutoGPT", + "timestamp": "2023-10-15T13:36:03.387061", + "commit_hash_to_benchmark": "74ee69daf1c0a2603f19bdb1edcfdf1f4e06bcff", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/webagent.json b/arena/webagent.json new file mode 100644 index 000000000000..f1bccc9f71c0 --- /dev/null +++ b/arena/webagent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/ddNTP/myagent.git", + "timestamp": "2023-09-20T11:21:05.331950", + "commit_hash_to_benchmark": "377d0af228bad019be0a9743c2824c033e039654", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/webgeek.json b/arena/webgeek.json new file mode 100644 index 000000000000..33789db6b0f0 --- /dev/null +++ b/arena/webgeek.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/webgeeksai/AutoGPT.git", + "timestamp": "2023-10-13T06:22:22.056151", + "commit_hash_to_benchmark": "38790a27ed2c1b63a301b6a67e7590f2d30de53e", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/wedding-planner.json b/arena/wedding-planner.json new file mode 100644 index 000000000000..b2acfa68685b --- /dev/null +++ b/arena/wedding-planner.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/mogronalol/AutoGPT", + "timestamp": "2023-10-08T20:31:43.422977", + "commit_hash_to_benchmark": "b52aba4ef545add8fb6c7f8009615cb38e24db80", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/woohoo_agent.json b/arena/woohoo_agent.json new file mode 100644 index 000000000000..a805c34986a7 --- /dev/null +++ b/arena/woohoo_agent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/FIresInWind/AutoGPT", + "timestamp": "2023-10-19T15:14:59.786203", + "commit_hash_to_benchmark": "4b1e8f6e8b4186ec6563301c146fbf3425f92715", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/wyjagent.json b/arena/wyjagent.json new file mode 100644 index 000000000000..e96772536dc7 --- /dev/null +++ b/arena/wyjagent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/wangyijunlyy/AutoGPT", + "timestamp": "2023-11-03T09:21:36.143887", + "commit_hash_to_benchmark": "d9ec0ac3ad7b48eb44e6403e88d2dc5696fd4950", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/xmly.json b/arena/xmly.json new file mode 100644 index 000000000000..23cf046e52e3 --- /dev/null +++ b/arena/xmly.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/dongdaoguang/AutoGPT", + "timestamp": "2023-10-11T06:30:06.866694", + "commit_hash_to_benchmark": "57bcbdf45c6c1493a4e5f6a4e72594ea13c10f93", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/xq_agent.json b/arena/xq_agent.json new file mode 100644 index 000000000000..cccf5586bb04 --- /dev/null +++ b/arena/xq_agent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/emptykid/AutoGPT", + "timestamp": "2023-10-24T10:37:55.170776", + "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/xt0m-GPT.json b/arena/xt0m-GPT.json new file mode 100644 index 000000000000..130bbae2fc35 --- /dev/null +++ b/arena/xt0m-GPT.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/jcartes/xt0m-GPT", + "timestamp": "2023-10-15T01:31:05.785913", + "commit_hash_to_benchmark": "57bcbdf45c6c1493a4e5f6a4e72594ea13c10f93", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/xtest.json b/arena/xtest.json new file mode 100644 index 000000000000..e189babe38a4 --- /dev/null +++ b/arena/xtest.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/popperxu/AutoGPT", + "timestamp": "2023-10-31T06:25:36.338549", + "commit_hash_to_benchmark": "c3569d1842e6568ab1327e577603e71ad1feb622", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/yarbis.json b/arena/yarbis.json new file mode 100644 index 000000000000..65d6c50f23ae --- /dev/null +++ b/arena/yarbis.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/sintecba/AutoGPT", + "timestamp": "2023-10-10T18:11:07.473738", + "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/zaheer.json b/arena/zaheer.json new file mode 100644 index 000000000000..01e4e72c8781 --- /dev/null +++ b/arena/zaheer.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/zaheerahmad33/AutoGPT", + "timestamp": "2023-10-22T21:48:48.414779", + "commit_hash_to_benchmark": "b4ee485906c1d8da71ce9b3093996383322980fe", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/zcb.json b/arena/zcb.json new file mode 100644 index 000000000000..c1892107073e --- /dev/null +++ b/arena/zcb.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/asasasheshou/AutoGPT", + "timestamp": "2023-10-25T09:15:30.114147", + "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/zczc.json b/arena/zczc.json new file mode 100644 index 000000000000..b484f0bef80b --- /dev/null +++ b/arena/zczc.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Howard-Cheung/AutoGPT", + "timestamp": "2023-10-26T12:48:30.729105", + "commit_hash_to_benchmark": "ab2a61833584c42ededa805cbac50718c72aa5ae", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/zhizhi.json b/arena/zhizhi.json new file mode 100644 index 000000000000..58d86008e690 --- /dev/null +++ b/arena/zhizhi.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/bolyage/zhizhi", + "timestamp": "2023-10-19T11:38:51.332966", + "commit_hash_to_benchmark": "4b1e8f6e8b4186ec6563301c146fbf3425f92715", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/zlipknot_1.json b/arena/zlipknot_1.json new file mode 100644 index 000000000000..0532417963a3 --- /dev/null +++ b/arena/zlipknot_1.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/zlipknot/AutoGPT.git", + "timestamp": "2023-10-25T19:20:38.529540", + "commit_hash_to_benchmark": "89d333f3bb422495f21e04bdd2bba3cb8c1a34ae", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/zlipknot_test_agent_4.json b/arena/zlipknot_test_agent_4.json new file mode 100644 index 000000000000..2096d67b560c --- /dev/null +++ b/arena/zlipknot_test_agent_4.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/zlipknot/AutoGPT.git", + "timestamp": "2023-10-25T19:13:02.418676", + "commit_hash_to_benchmark": "89d333f3bb422495f21e04bdd2bba3cb8c1a34ae", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/zze.json b/arena/zze.json new file mode 100644 index 000000000000..7b69f1872b6a --- /dev/null +++ b/arena/zze.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/quasimodo7614/AutoGPT", + "timestamp": "2023-10-16T07:49:29.399457", + "commit_hash_to_benchmark": "2f79caa6b901d006a78c1ac9e69db4465c0f971a", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/autogpt_platform/backend/backend/server/routers/integrations.py b/autogpt_platform/backend/backend/server/routers/integrations.py new file mode 100644 index 000000000000..c4367b3d38ed --- /dev/null +++ b/autogpt_platform/backend/backend/server/routers/integrations.py @@ -0,0 +1,236 @@ +import logging +from typing import Annotated + +from autogpt_libs.supabase_integration_credentials_store import ( + SupabaseIntegrationCredentialsStore, +) +from autogpt_libs.supabase_integration_credentials_store.types import ( + APIKeyCredentials, + Credentials, + CredentialsType, + OAuth2Credentials, +) +from fastapi import ( + APIRouter, + Body, + Depends, + HTTPException, + Path, + Query, + Request, + Response, +) +from pydantic import BaseModel, SecretStr +from supabase import Client + +from backend.integrations.oauth import HANDLERS_BY_NAME, BaseOAuthHandler +from backend.util.settings import Settings + +from ..utils import get_supabase, get_user_id + +logger = logging.getLogger(__name__) +settings = Settings() +router = APIRouter() + + +def get_store(supabase: Client = Depends(get_supabase)): + return SupabaseIntegrationCredentialsStore(supabase) + + +class LoginResponse(BaseModel): + login_url: str + state_token: str + + +@router.get("/{provider}/login") +async def login( + provider: Annotated[str, Path(title="The provider to initiate an OAuth flow for")], + user_id: Annotated[str, Depends(get_user_id)], + request: Request, + store: Annotated[SupabaseIntegrationCredentialsStore, Depends(get_store)], + scopes: Annotated[ + str, Query(title="Comma-separated list of authorization scopes") + ] = "", +) -> LoginResponse: + handler = _get_provider_oauth_handler(request, provider) + + requested_scopes = scopes.split(",") if scopes else [] + + # Generate and store a secure random state token along with the scopes + state_token = await store.store_state_token(user_id, provider, requested_scopes) + + login_url = handler.get_login_url(requested_scopes, state_token) + + return LoginResponse(login_url=login_url, state_token=state_token) + + +class CredentialsMetaResponse(BaseModel): + id: str + type: CredentialsType + title: str | None + scopes: list[str] | None + username: str | None + + +@router.post("/{provider}/callback") +async def callback( + provider: Annotated[str, Path(title="The target provider for this OAuth exchange")], + code: Annotated[str, Body(title="Authorization code acquired by user login")], + state_token: Annotated[str, Body(title="Anti-CSRF nonce")], + store: Annotated[SupabaseIntegrationCredentialsStore, Depends(get_store)], + user_id: Annotated[str, Depends(get_user_id)], + request: Request, +) -> CredentialsMetaResponse: + logger.debug(f"Received OAuth callback for provider: {provider}") + handler = _get_provider_oauth_handler(request, provider) + + # Verify the state token + if not await store.verify_state_token(user_id, state_token, provider): + logger.warning(f"Invalid or expired state token for user {user_id}") + raise HTTPException(status_code=400, detail="Invalid or expired state token") + + try: + scopes = await store.get_any_valid_scopes_from_state_token( + user_id, state_token, provider + ) + logger.debug(f"Retrieved scopes from state token: {scopes}") + + scopes = handler.handle_default_scopes(scopes) + + credentials = handler.exchange_code_for_tokens(code, scopes) + logger.debug(f"Received credentials with final scopes: {credentials.scopes}") + + # Check if the granted scopes are sufficient for the requested scopes + if not set(scopes).issubset(set(credentials.scopes)): + # For now, we'll just log the warning and continue + logger.warning( + f"Granted scopes {credentials.scopes} for {provider}do not include all requested scopes {scopes}" + ) + + except Exception as e: + logger.error(f"Code->Token exchange failed for provider {provider}: {e}") + raise HTTPException( + status_code=400, detail=f"Failed to exchange code for tokens: {str(e)}" + ) + + # TODO: Allow specifying `title` to set on `credentials` + store.add_creds(user_id, credentials) + + logger.debug( + f"Successfully processed OAuth callback for user {user_id} and provider {provider}" + ) + return CredentialsMetaResponse( + id=credentials.id, + type=credentials.type, + title=credentials.title, + scopes=credentials.scopes, + username=credentials.username, + ) + + +@router.get("/{provider}/credentials") +async def list_credentials( + provider: Annotated[str, Path(title="The provider to list credentials for")], + user_id: Annotated[str, Depends(get_user_id)], + store: Annotated[SupabaseIntegrationCredentialsStore, Depends(get_store)], +) -> list[CredentialsMetaResponse]: + credentials = store.get_creds_by_provider(user_id, provider) + return [ + CredentialsMetaResponse( + id=cred.id, + type=cred.type, + title=cred.title, + scopes=cred.scopes if isinstance(cred, OAuth2Credentials) else None, + username=cred.username if isinstance(cred, OAuth2Credentials) else None, + ) + for cred in credentials + ] + + +@router.get("/{provider}/credentials/{cred_id}") +async def get_credential( + provider: Annotated[str, Path(title="The provider to retrieve credentials for")], + cred_id: Annotated[str, Path(title="The ID of the credentials to retrieve")], + user_id: Annotated[str, Depends(get_user_id)], + store: Annotated[SupabaseIntegrationCredentialsStore, Depends(get_store)], +) -> Credentials: + credential = store.get_creds_by_id(user_id, cred_id) + if not credential: + raise HTTPException(status_code=404, detail="Credentials not found") + if credential.provider != provider: + raise HTTPException( + status_code=404, detail="Credentials do not match the specified provider" + ) + return credential + + +@router.post("/{provider}/credentials", status_code=201) +async def create_api_key_credentials( + store: Annotated[SupabaseIntegrationCredentialsStore, Depends(get_store)], + user_id: Annotated[str, Depends(get_user_id)], + provider: Annotated[str, Path(title="The provider to create credentials for")], + api_key: Annotated[str, Body(title="The API key to store")], + title: Annotated[str, Body(title="Optional title for the credentials")], + expires_at: Annotated[ + int | None, Body(title="Unix timestamp when the key expires") + ] = None, +) -> APIKeyCredentials: + new_credentials = APIKeyCredentials( + provider=provider, + api_key=SecretStr(api_key), + title=title, + expires_at=expires_at, + ) + + try: + store.add_creds(user_id, new_credentials) + except Exception as e: + raise HTTPException( + status_code=500, detail=f"Failed to store credentials: {str(e)}" + ) + return new_credentials + + +@router.delete("/{provider}/credentials/{cred_id}", status_code=204) +async def delete_credential( + provider: Annotated[str, Path(title="The provider to delete credentials for")], + cred_id: Annotated[str, Path(title="The ID of the credentials to delete")], + user_id: Annotated[str, Depends(get_user_id)], + store: Annotated[SupabaseIntegrationCredentialsStore, Depends(get_store)], +): + creds = store.get_creds_by_id(user_id, cred_id) + if not creds: + raise HTTPException(status_code=404, detail="Credentials not found") + if creds.provider != provider: + raise HTTPException( + status_code=404, detail="Credentials do not match the specified provider" + ) + + store.delete_creds_by_id(user_id, cred_id) + return Response(status_code=204) + + +# -------- UTILITIES --------- # + + +def _get_provider_oauth_handler(req: Request, provider_name: str) -> BaseOAuthHandler: + if provider_name not in HANDLERS_BY_NAME: + raise HTTPException( + status_code=404, detail=f"Unknown provider '{provider_name}'" + ) + + client_id = getattr(settings.secrets, f"{provider_name}_client_id") + client_secret = getattr(settings.secrets, f"{provider_name}_client_secret") + if not (client_id and client_secret): + raise HTTPException( + status_code=501, + detail=f"Integration with provider '{provider_name}' is not configured", + ) + + handler_class = HANDLERS_BY_NAME[provider_name] + frontend_base_url = settings.config.frontend_base_url or str(req.base_url) + return handler_class( + client_id=client_id, + client_secret=client_secret, + redirect_uri=f"{frontend_base_url}/auth/integrations/oauth_callback", + ) diff --git a/autogpt_platform/frontend/src/components/ui/use-toast.ts b/autogpt_platform/frontend/src/components/ui/use-toast.ts new file mode 100644 index 000000000000..6555e795c1ed --- /dev/null +++ b/autogpt_platform/frontend/src/components/ui/use-toast.ts @@ -0,0 +1,191 @@ +"use client"; + +// Inspired by react-hot-toast library +import * as React from "react"; + +import type { ToastActionElement, ToastProps } from "@/components/ui/toast"; + +const TOAST_LIMIT = 1; +const TOAST_REMOVE_DELAY = 1000000; + +type ToasterToast = ToastProps & { + id: string; + title?: React.ReactNode; + description?: React.ReactNode; + action?: ToastActionElement; +}; + +const actionTypes = { + ADD_TOAST: "ADD_TOAST", + UPDATE_TOAST: "UPDATE_TOAST", + DISMISS_TOAST: "DISMISS_TOAST", + REMOVE_TOAST: "REMOVE_TOAST", +} as const; + +let count = 0; + +function genId() { + count = (count + 1) % Number.MAX_SAFE_INTEGER; + return count.toString(); +} + +type ActionType = typeof actionTypes; + +type Action = + | { + type: ActionType["ADD_TOAST"]; + toast: ToasterToast; + } + | { + type: ActionType["UPDATE_TOAST"]; + toast: Partial; + } + | { + type: ActionType["DISMISS_TOAST"]; + toastId?: ToasterToast["id"]; + } + | { + type: ActionType["REMOVE_TOAST"]; + toastId?: ToasterToast["id"]; + }; + +interface State { + toasts: ToasterToast[]; +} + +const toastTimeouts = new Map>(); + +const addToRemoveQueue = (toastId: string) => { + if (toastTimeouts.has(toastId)) { + return; + } + + const timeout = setTimeout(() => { + toastTimeouts.delete(toastId); + dispatch({ + type: "REMOVE_TOAST", + toastId: toastId, + }); + }, TOAST_REMOVE_DELAY); + + toastTimeouts.set(toastId, timeout); +}; + +export const reducer = (state: State, action: Action): State => { + switch (action.type) { + case "ADD_TOAST": + return { + ...state, + toasts: [action.toast, ...state.toasts].slice(0, TOAST_LIMIT), + }; + + case "UPDATE_TOAST": + return { + ...state, + toasts: state.toasts.map((t) => + t.id === action.toast.id ? { ...t, ...action.toast } : t, + ), + }; + + case "DISMISS_TOAST": { + const { toastId } = action; + + // ! Side effects ! - This could be extracted into a dismissToast() action, + // but I'll keep it here for simplicity + if (toastId) { + addToRemoveQueue(toastId); + } else { + state.toasts.forEach((toast) => { + addToRemoveQueue(toast.id); + }); + } + + return { + ...state, + toasts: state.toasts.map((t) => + t.id === toastId || toastId === undefined + ? { + ...t, + open: false, + } + : t, + ), + }; + } + case "REMOVE_TOAST": + if (action.toastId === undefined) { + return { + ...state, + toasts: [], + }; + } + return { + ...state, + toasts: state.toasts.filter((t) => t.id !== action.toastId), + }; + } +}; + +const listeners: Array<(state: State) => void> = []; + +let memoryState: State = { toasts: [] }; + +function dispatch(action: Action) { + memoryState = reducer(memoryState, action); + listeners.forEach((listener) => { + listener(memoryState); + }); +} + +type Toast = Omit; + +function toast({ ...props }: Toast) { + const id = genId(); + + const update = (props: ToasterToast) => + dispatch({ + type: "UPDATE_TOAST", + toast: { ...props, id }, + }); + const dismiss = () => dispatch({ type: "DISMISS_TOAST", toastId: id }); + + dispatch({ + type: "ADD_TOAST", + toast: { + ...props, + id, + open: true, + onOpenChange: (open) => { + if (!open) dismiss(); + }, + }, + }); + + return { + id: id, + dismiss, + update, + }; +} + +function useToast() { + const [state, setState] = React.useState(memoryState); + + React.useEffect(() => { + listeners.push(setState); + return () => { + const index = listeners.indexOf(setState); + if (index > -1) { + listeners.splice(index, 1); + } + }; + }, [state]); + + return { + ...state, + toast, + dismiss: (toastId?: string) => dispatch({ type: "DISMISS_TOAST", toastId }), + }; +} + +export { useToast, toast }; diff --git a/autogpts/autogpt/.coveragerc b/autogpts/autogpt/.coveragerc new file mode 100644 index 000000000000..d4fa8be10b1b --- /dev/null +++ b/autogpts/autogpt/.coveragerc @@ -0,0 +1,2 @@ +[run] +relative_files = true \ No newline at end of file diff --git a/autogpts/autogpt/.devcontainer/Dockerfile b/autogpts/autogpt/.devcontainer/Dockerfile new file mode 100644 index 000000000000..17b1e73f4597 --- /dev/null +++ b/autogpts/autogpt/.devcontainer/Dockerfile @@ -0,0 +1,13 @@ +# Use an official Python base image from the Docker Hub +FROM python:3.10 + +# Install browsers +RUN apt-get update && apt-get install -y \ + chromium-driver firefox-esr \ + ca-certificates + +# Install utilities +RUN apt-get install -y curl jq wget git + +# Declare working directory +WORKDIR /workspace/AutoGPT diff --git a/autogpts/autogpt/.devcontainer/devcontainer.json b/autogpts/autogpt/.devcontainer/devcontainer.json new file mode 100644 index 000000000000..d66c5ffa547f --- /dev/null +++ b/autogpts/autogpt/.devcontainer/devcontainer.json @@ -0,0 +1,56 @@ +{ + "dockerComposeFile": "./docker-compose.yml", + "service": "auto-gpt", + "workspaceFolder": "/workspace/AutoGPT", + "shutdownAction": "stopCompose", + "features": { + "ghcr.io/devcontainers/features/common-utils:2": { + "installZsh": "true", + "username": "vscode", + "userUid": "1000", + "userGid": "1000", + "upgradePackages": "true" + }, + "ghcr.io/devcontainers/features/desktop-lite:1": {}, + "ghcr.io/devcontainers/features/github-cli:1": {}, + "ghcr.io/devcontainers/features/python:1": "none", + "ghcr.io/devcontainers/features/node:1": "none", + "ghcr.io/devcontainers/features/git:1": { + "version": "latest", + "ppa": "false" + } + }, + // Configure tool-specific properties. + "customizations": { + // Configure properties specific to VS Code. + "vscode": { + // Set *default* container specific settings.json values on container create. + "settings": { + "python.defaultInterpreterPath": "/usr/local/bin/python", + "python.testing.pytestEnabled": true, + "python.testing.unittestEnabled": false + }, + "extensions": [ + "ms-python.python", + "VisualStudioExptTeam.vscodeintellicode", + "ms-python.vscode-pylance", + "ms-python.black-formatter", + "ms-python.isort", + "GitHub.vscode-pull-request-github", + "GitHub.copilot", + "github.vscode-github-actions" + ] + } + }, + // Use 'forwardPorts' to make a list of ports inside the container available locally. + // "forwardPorts": [], + + // Use 'postCreateCommand' to run commands after the container is created. + // "postCreateCommand": "poetry install", + + // Set `remoteUser` to `root` to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root. + "remoteUser": "vscode", + + // Add the freshly containerized repo to the list of safe repositories + "postCreateCommand": "git config --global --add safe.directory /workspace/AutoGPT && poetry install" +} diff --git a/autogpts/autogpt/.devcontainer/docker-compose.yml b/autogpts/autogpt/.devcontainer/docker-compose.yml new file mode 100644 index 000000000000..75871fa0b627 --- /dev/null +++ b/autogpts/autogpt/.devcontainer/docker-compose.yml @@ -0,0 +1,12 @@ +# To boot the app run the following: +# docker-compose run auto-gpt +version: '3.9' + +services: + auto-gpt: + build: + dockerfile: .devcontainer/Dockerfile + context: ../ + tty: true + volumes: + - ../:/workspace/AutoGPT diff --git a/autogpts/autogpt/.dockerignore b/autogpts/autogpt/.dockerignore new file mode 100644 index 000000000000..1ee35738b6f3 --- /dev/null +++ b/autogpts/autogpt/.dockerignore @@ -0,0 +1,14 @@ +.* +**/.venv* +**/__pycache__ +*.template +*.yaml +*.yml +!prompt_settings.yaml + +data/* +logs/* +agbenchmark_config/logs/* +agbenchmark_config/reports/* + +*.png diff --git a/autogpts/autogpt/.env.template b/autogpts/autogpt/.env.template new file mode 100644 index 000000000000..b5fb32c0de82 --- /dev/null +++ b/autogpts/autogpt/.env.template @@ -0,0 +1,239 @@ +################################################################################ +### AutoGPT - GENERAL SETTINGS +################################################################################ + +## OPENAI_API_KEY - OpenAI API Key (Example: my-openai-api-key) +OPENAI_API_KEY=your-openai-api-key + +## TELEMETRY_OPT_IN - Share telemetry on errors and other issues with the AutoGPT team, e.g. through Sentry. +## This helps us to spot and solve problems earlier & faster. (Default: DISABLED) +# TELEMETRY_OPT_IN=true + +## EXECUTE_LOCAL_COMMANDS - Allow local command execution (Default: False) +# EXECUTE_LOCAL_COMMANDS=False + +### Workspace ### + +## RESTRICT_TO_WORKSPACE - Restrict file operations to workspace ./data/agents//workspace (Default: True) +# RESTRICT_TO_WORKSPACE=True + +## DISABLED_COMMAND_CATEGORIES - The list of categories of commands that are disabled (Default: None) +# DISABLED_COMMAND_CATEGORIES= + +## FILE_STORAGE_BACKEND - Choose a storage backend for contents +## Options: local, gcs, s3 +# FILE_STORAGE_BACKEND=local + +## STORAGE_BUCKET - GCS/S3 Bucket to store contents in +# STORAGE_BUCKET=autogpt + +## GCS Credentials +# see https://cloud.google.com/storage/docs/authentication#libauth + +## AWS/S3 Credentials +# see https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html + +## S3_ENDPOINT_URL - If you're using non-AWS S3, set your endpoint here. +# S3_ENDPOINT_URL= + +### Miscellaneous ### + +## USER_AGENT - Define the user-agent used by the requests library to browse website (string) +# USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36" + +## AI_SETTINGS_FILE - Specifies which AI Settings file to use, relative to the AutoGPT root directory. (defaults to ai_settings.yaml) +# AI_SETTINGS_FILE=ai_settings.yaml + +## PLUGINS_CONFIG_FILE - The path to the plugins_config.yaml file, relative to the AutoGPT root directory. (Default plugins_config.yaml) +# PLUGINS_CONFIG_FILE=plugins_config.yaml + +## PROMPT_SETTINGS_FILE - Specifies which Prompt Settings file to use, relative to the AutoGPT root directory. (defaults to prompt_settings.yaml) +# PROMPT_SETTINGS_FILE=prompt_settings.yaml + +## AUTHORISE COMMAND KEY - Key to authorise commands +# AUTHORISE_COMMAND_KEY=y + +## EXIT_KEY - Key to exit AutoGPT +# EXIT_KEY=n + +################################################################################ +### LLM PROVIDER +################################################################################ + +## TEMPERATURE - Sets temperature in OpenAI (Default: 0) +# TEMPERATURE=0 + +## OPENAI_API_BASE_URL - Custom url for the OpenAI API, useful for connecting to custom backends. No effect if USE_AZURE is true, leave blank to keep the default url +# the following is an example: +# OPENAI_API_BASE_URL=http://localhost:443/v1 + +# OPENAI_API_TYPE= +# OPENAI_API_VERSION= + +## OPENAI_FUNCTIONS - Enables OpenAI functions: https://platform.openai.com/docs/guides/gpt/function-calling +## Note: this feature is only supported by OpenAI's newer models. +# OPENAI_FUNCTIONS=False + +## OPENAI_ORGANIZATION - Your OpenAI Organization key (Default: None) +# OPENAI_ORGANIZATION= + +## USE_AZURE - Use Azure OpenAI or not (Default: False) +# USE_AZURE=False + +## AZURE_CONFIG_FILE - The path to the azure.yaml file, relative to the folder containing this file. (Default: azure.yaml) +# AZURE_CONFIG_FILE=azure.yaml + +# AZURE_OPENAI_AD_TOKEN= +# AZURE_OPENAI_ENDPOINT= + +################################################################################ +### LLM MODELS +################################################################################ + +## SMART_LLM - Smart language model (Default: gpt-4-turbo) +# SMART_LLM=gpt-4-turbo + +## FAST_LLM - Fast language model (Default: gpt-3.5-turbo) +# FAST_LLM=gpt-3.5-turbo + +## EMBEDDING_MODEL - Model to use for creating embeddings +# EMBEDDING_MODEL=text-embedding-3-small + +################################################################################ +### SHELL EXECUTION +################################################################################ + +## SHELL_COMMAND_CONTROL - Whether to use "allowlist" or "denylist" to determine what shell commands can be executed (Default: denylist) +# SHELL_COMMAND_CONTROL=denylist + +## ONLY if SHELL_COMMAND_CONTROL is set to denylist: +## SHELL_DENYLIST - List of shell commands that ARE NOT allowed to be executed by AutoGPT (Default: sudo,su) +# SHELL_DENYLIST=sudo,su + +## ONLY if SHELL_COMMAND_CONTROL is set to allowlist: +## SHELL_ALLOWLIST - List of shell commands that ARE allowed to be executed by AutoGPT (Default: None) +# SHELL_ALLOWLIST= + +################################################################################ +### IMAGE GENERATION PROVIDER +################################################################################ + +### Common + +## IMAGE_PROVIDER - Image provider (Default: dalle) +# IMAGE_PROVIDER=dalle + +## IMAGE_SIZE - Image size (Default: 256) +# IMAGE_SIZE=256 + +### Huggingface (IMAGE_PROVIDER=huggingface) + +## HUGGINGFACE_IMAGE_MODEL - Text-to-image model from Huggingface (Default: CompVis/stable-diffusion-v1-4) +# HUGGINGFACE_IMAGE_MODEL=CompVis/stable-diffusion-v1-4 + +## HUGGINGFACE_API_TOKEN - HuggingFace API token (Default: None) +# HUGGINGFACE_API_TOKEN= + +### Stable Diffusion (IMAGE_PROVIDER=sdwebui) + +## SD_WEBUI_AUTH - Stable Diffusion Web UI username:password pair (Default: None) +# SD_WEBUI_AUTH= + +## SD_WEBUI_URL - Stable Diffusion Web UI API URL (Default: http://localhost:7860) +# SD_WEBUI_URL=http://localhost:7860 + +################################################################################ +### AUDIO TO TEXT PROVIDER +################################################################################ + +## AUDIO_TO_TEXT_PROVIDER - Audio-to-text provider (Default: huggingface) +# AUDIO_TO_TEXT_PROVIDER=huggingface + +## HUGGINGFACE_AUDIO_TO_TEXT_MODEL - The model for HuggingFace to use (Default: CompVis/stable-diffusion-v1-4) +# HUGGINGFACE_AUDIO_TO_TEXT_MODEL=CompVis/stable-diffusion-v1-4 + +################################################################################ +### GITHUB +################################################################################ + +## GITHUB_API_KEY - Github API key / PAT (Default: None) +# GITHUB_API_KEY= + +## GITHUB_USERNAME - Github username (Default: None) +# GITHUB_USERNAME= + +################################################################################ +### WEB BROWSING +################################################################################ + +## HEADLESS_BROWSER - Whether to run the browser in headless mode (default: True) +# HEADLESS_BROWSER=True + +## USE_WEB_BROWSER - Sets the web-browser driver to use with selenium (default: chrome) +# USE_WEB_BROWSER=chrome + +## BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunks to summarize (Default: 3000) +# BROWSE_CHUNK_MAX_LENGTH=3000 + +## BROWSE_SPACY_LANGUAGE_MODEL - spaCy language model](https://spacy.io/usage/models) to use when creating chunks. (Default: en_core_web_sm) +# BROWSE_SPACY_LANGUAGE_MODEL=en_core_web_sm + +## GOOGLE_API_KEY - Google API key (Default: None) +# GOOGLE_API_KEY= + +## GOOGLE_CUSTOM_SEARCH_ENGINE_ID - Google custom search engine ID (Default: None) +# GOOGLE_CUSTOM_SEARCH_ENGINE_ID= + +################################################################################ +### TEXT TO SPEECH PROVIDER +################################################################################ + +## TEXT_TO_SPEECH_PROVIDER - Which Text to Speech provider to use (Default: gtts) +## Options: gtts, streamelements, elevenlabs, macos +# TEXT_TO_SPEECH_PROVIDER=gtts + +## STREAMELEMENTS_VOICE - Voice to use for StreamElements (Default: Brian) +# STREAMELEMENTS_VOICE=Brian + +## ELEVENLABS_API_KEY - Eleven Labs API key (Default: None) +# ELEVENLABS_API_KEY= + +## ELEVENLABS_VOICE_ID - Eleven Labs voice ID (Example: None) +# ELEVENLABS_VOICE_ID= + +################################################################################ +### CHAT MESSAGES +################################################################################ + +## CHAT_MESSAGES_ENABLED - Enable chat messages (Default: False) +# CHAT_MESSAGES_ENABLED=False + +################################################################################ +### LOGGING +################################################################################ + +## LOG_LEVEL - Set the minimum level to filter log output by. Setting this to DEBUG implies LOG_FORMAT=debug, unless LOG_FORMAT is set explicitly. +## Options: DEBUG, INFO, WARNING, ERROR, CRITICAL +# LOG_LEVEL=INFO + +## LOG_FORMAT - The format in which to log messages to the console (and log files). +## Options: simple, debug, structured_google_cloud +# LOG_FORMAT=simple + +## LOG_FILE_FORMAT - Normally follows the LOG_FORMAT setting, but can be set separately. +## Note: Log file output is disabled if LOG_FORMAT=structured_google_cloud. +# LOG_FILE_FORMAT=simple + +## PLAIN_OUTPUT - Disables animated typing and the spinner in the console output. (Default: False) +# PLAIN_OUTPUT=False + + +################################################################################ +### Agent Protocol Server Settings +################################################################################ +## AP_SERVER_PORT - Specifies what port the agent protocol server will listen on. (Default: 8000) +## AP_SERVER_DB_URL - Specifies what connection url the agent protocol database will connect to (Default: Internal SQLite) +## AP_SERVER_CORS_ALLOWED_ORIGINS - Comma separated list of allowed origins for CORS. (Default: http://localhost:{AP_SERVER_PORT}) +# AP_SERVER_PORT=8000 +# AP_SERVER_DB_URL=sqlite:///data/ap_server.db +# AP_SERVER_CORS_ALLOWED_ORIGINS= diff --git a/autogpts/autogpt/.envrc b/autogpts/autogpt/.envrc new file mode 100644 index 000000000000..85777e21f01b --- /dev/null +++ b/autogpts/autogpt/.envrc @@ -0,0 +1,4 @@ +# Upon entering directory, direnv requests user permission once to automatically load project dependencies onwards. +# Eliminating the need of running "nix develop github:superherointj/nix-auto-gpt" for Nix users to develop/use AutoGPT. + +[[ -z $IN_NIX_SHELL ]] && use flake github:superherointj/nix-auto-gpt diff --git a/autogpts/autogpt/.flake8 b/autogpts/autogpt/.flake8 new file mode 100644 index 000000000000..b9aa1bc73057 --- /dev/null +++ b/autogpts/autogpt/.flake8 @@ -0,0 +1,11 @@ +[flake8] +max-line-length = 88 +extend-exclude = + .*_cache/, + .venv, + data/, + logs/, + tests/unit/data/, +extend-ignore = + # No whitespace before ':' conflicts with Black style for slices + E203, diff --git a/autogpts/autogpt/.gitattributes b/autogpts/autogpt/.gitattributes new file mode 100644 index 000000000000..60fb560da4aa --- /dev/null +++ b/autogpts/autogpt/.gitattributes @@ -0,0 +1,5 @@ +# Exclude VCR cassettes from stats +tests/vcr_cassettes/**/**.y*ml linguist-generated + +# Mark documentation as such +docs/**.md linguist-documentation diff --git a/autogpts/autogpt/.gitignore b/autogpts/autogpt/.gitignore new file mode 100644 index 000000000000..aaff758a03cf --- /dev/null +++ b/autogpts/autogpt/.gitignore @@ -0,0 +1,169 @@ +## Original ignores +autogpt/keys.py +autogpt/*.json +*.mpeg +.env +azure.yaml +ai_settings.yaml +last_run_ai_settings.yaml +.vscode +.idea/* +auto-gpt.json +log.txt +log-ingestion.txt +/logs +*.log +*.mp3 +mem.sqlite3 +venvAutoGPT +data/* + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +/plugins/* +plugins_config.yaml +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ +site/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.direnv/ +.env +.venv +env/ +venv*/ +ENV/ +env.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ +llama-* +vicuna-* + +# mac +.DS_Store + +openai/ + +# news +CURRENT_BULLETIN.md + +# Nodejs +package-lock.json +package.json + +# Keep +!.keep diff --git a/autogpts/autogpt/.pre-commit-config.yaml b/autogpts/autogpt/.pre-commit-config.yaml new file mode 100644 index 000000000000..d2a061a4d83d --- /dev/null +++ b/autogpts/autogpt/.pre-commit-config.yaml @@ -0,0 +1,42 @@ +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: check-added-large-files + args: ['--maxkb=500'] + - id: check-byte-order-marker + - id: check-case-conflict + - id: check-merge-conflict + - id: check-symlinks + - id: debug-statements + + - repo: https://github.com/pycqa/isort + rev: 5.12.0 + hooks: + - id: isort + language_version: python3.10 + + - repo: https://github.com/psf/black + rev: 23.3.0 + hooks: + - id: black + language_version: python3.10 + + # - repo: https://github.com/pre-commit/mirrors-mypy + # rev: 'v1.3.0' + # hooks: + # - id: mypy + + - repo: local + hooks: + # - id: autoflake + # name: autoflake + # entry: autoflake --in-place --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring autogpt tests + # language: python + # types: [ python ] + - id: pytest-check + name: pytest-check + entry: bash -c 'cd autogpts/autogpt && poetry run pytest --cov=autogpt tests/unit' + language: system + pass_filenames: false + always_run: true diff --git a/autogpts/autogpt/.sourcery.yaml b/autogpts/autogpt/.sourcery.yaml new file mode 100644 index 000000000000..da171e7598f4 --- /dev/null +++ b/autogpts/autogpt/.sourcery.yaml @@ -0,0 +1,71 @@ +# 🪄 This is your project's Sourcery configuration file. + +# You can use it to get Sourcery working in the way you want, such as +# ignoring specific refactorings, skipping directories in your project, +# or writing custom rules. + +# 📚 For a complete reference to this file, see the documentation at +# https://docs.sourcery.ai/Configuration/Project-Settings/ + +# This file was auto-generated by Sourcery on 2023-02-25 at 21:07. + +version: '1' # The schema version of this config file + +ignore: # A list of paths or files which Sourcery will ignore. +- .git +- venv +- .venv +- build +- dist +- env +- .env +- .tox + +rule_settings: + enable: + - default + - gpsg + disable: [] # A list of rule IDs Sourcery will never suggest. + rule_types: + - refactoring + - suggestion + - comment + python_version: '3.10' # A string specifying the lowest Python version your project supports. Sourcery will not suggest refactorings requiring a higher Python version. + +# rules: # A list of custom rules Sourcery will include in its analysis. +# - id: no-print-statements +# description: Do not use print statements in the test directory. +# pattern: print(...) +# language: python +# replacement: +# condition: +# explanation: +# paths: +# include: +# - test +# exclude: +# - conftest.py +# tests: [] +# tags: [] + +# rule_tags: {} # Additional rule tags. + +# metrics: +# quality_threshold: 25.0 + +# github: +# labels: [] +# ignore_labels: +# - sourcery-ignore +# request_review: author +# sourcery_branch: sourcery/{base_branch} + +# clone_detection: +# min_lines: 3 +# min_duplicates: 2 +# identical_clones_only: false + +# proxy: +# url: +# ssl_certs_file: +# no_ssl_verify: false diff --git a/autogpts/autogpt/BULLETIN.md b/autogpts/autogpt/BULLETIN.md new file mode 100644 index 000000000000..0140e71220a6 --- /dev/null +++ b/autogpts/autogpt/BULLETIN.md @@ -0,0 +1,13 @@ +# QUICK LINKS 🔗 +# -------------- +🌎 *Official Website*: https://agpt.co. +📖 *User Guide*: https://docs.agpt.co/autogpt. +👩 *Contributors Wiki*: https://github.com/Significant-Gravitas/Nexus/wiki/Contributing. + +# v0.5.0 RELEASE HIGHLIGHTS! 🚀🚀 +# ------------------------------- +Cloud-readiness, a new UI, support for the newest Agent Protocol version, and much more: +*v0.5.0 is our biggest release yet!* + +Take a look at the Release Notes on Github for the full changelog: +https://github.com/Significant-Gravitas/AutoGPT/releases. diff --git a/autogpts/autogpt/Dockerfile b/autogpts/autogpt/Dockerfile new file mode 100644 index 000000000000..b7af437a83dc --- /dev/null +++ b/autogpts/autogpt/Dockerfile @@ -0,0 +1,56 @@ +# 'dev' or 'release' container build +ARG BUILD_TYPE=dev + +# Use an official Python base image from the Docker Hub +FROM python:3.10-slim AS autogpt-base + +# Install browsers +RUN apt-get update && apt-get install -y \ + chromium-driver ca-certificates gcc \ + && apt-get clean && rm -rf /var/lib/apt/lists/* + +# Install utilities +RUN apt-get update && apt-get install -y \ + curl jq wget git \ + && apt-get clean && rm -rf /var/lib/apt/lists/* + +# Set environment variables +ENV PIP_NO_CACHE_DIR=yes \ + PYTHONUNBUFFERED=1 \ + PYTHONDONTWRITEBYTECODE=1 \ + POETRY_HOME="/opt/poetry" \ + POETRY_VIRTUALENVS_PATH="/venv" \ + POETRY_VIRTUALENVS_IN_PROJECT=0 \ + POETRY_NO_INTERACTION=1 + +# Install and configure Poetry +RUN curl -sSL https://install.python-poetry.org | python3 - +ENV PATH="$POETRY_HOME/bin:$PATH" +RUN poetry config installer.max-workers 10 + +WORKDIR /app +COPY pyproject.toml poetry.lock ./ + +# Set the entrypoint +ENTRYPOINT ["poetry", "run", "autogpt"] +CMD [] + +# dev build -> include everything +FROM autogpt-base as autogpt-dev +RUN poetry install --no-cache --no-root \ + && rm -rf $(poetry env info --path)/src +ONBUILD COPY . ./ + +# release build -> include bare minimum +FROM autogpt-base as autogpt-release +RUN poetry install --no-cache --no-root --without dev \ + && rm -rf $(poetry env info --path)/src +ONBUILD COPY autogpt/ ./autogpt +ONBUILD COPY scripts/ ./scripts +ONBUILD COPY plugins/ ./plugins +ONBUILD COPY prompt_settings.yaml ./prompt_settings.yaml +ONBUILD COPY README.md ./README.md +ONBUILD RUN mkdir ./data + +FROM autogpt-${BUILD_TYPE} AS autogpt +RUN poetry install --only-root diff --git a/autogpts/autogpt/README.md b/autogpts/autogpt/README.md new file mode 100644 index 000000000000..86e0aa69fa8e --- /dev/null +++ b/autogpts/autogpt/README.md @@ -0,0 +1,180 @@ +# AutoGPT: An Autonomous GPT-4 Experiment + +[📖 **Documentation**][docs] + |  +[🚀 **Contributing**](../../CONTRIBUTING.md) + +AutoGPT is an experimental open-source application showcasing the capabilities of modern Large Language Models. This program, driven by GPT-4, chains together LLM "thoughts", to autonomously achieve whatever goal you set. As one of the first examples of GPT-4 running fully autonomously, AutoGPT pushes the boundaries of what is possible with AI. + +

Demo April 16th 2023

+ +https://user-images.githubusercontent.com/70048414/232352935-55c6bf7c-3958-406e-8610-0913475a0b05.mp4 + +Demo made by Blake Werlinger + +## 🚀 Features + +- 🔌 Agent Protocol ([docs](https://agentprotocol.ai)) +- 💻 Easy to use UI +- 🌐 Internet access for searches and information gathering +- 🧠 Powered by a mix of GPT-4 and GPT-3.5 Turbo +- 🔗 Access to popular websites and platforms +- 🗃️ File generation and editing capabilities +- 🔌 Extensibility with Plugins + + +## Setting up AutoGPT +1. Get an OpenAI [API Key](https://platform.openai.com/account/api-keys) +2. Copy `.env.template` to `.env` and set `OPENAI_API_KEY` +3. Make sure you have Poetry [installed](https://python-poetry.org/docs/#installation) + +For more ways to run AutoGPT, more detailed instructions, and more configuration options, +see the [setup guide][docs/setup]. + +## Running AutoGPT +The CLI should be self-documenting: +```shell +$ ./autogpt.sh --help +Usage: python -m autogpt [OPTIONS] COMMAND [ARGS]... + +Options: + --help Show this message and exit. + +Commands: + run Sets up and runs an agent, based on the task specified by the... + serve Starts an Agent Protocol compliant AutoGPT server, which creates... +``` +When run without a sub-command, it will default to `run` for legacy reasons. + +
+ +$ ./autogpt.sh run --help + + +The `run` sub-command starts AutoGPT with the legacy CLI interface: + +```shell +$ ./autogpt.sh run --help +Usage: python -m autogpt run [OPTIONS] + + Sets up and runs an agent, based on the task specified by the user, or + resumes an existing agent. + +Options: + -c, --continuous Enable Continuous Mode + -y, --skip-reprompt Skips the re-prompting messages at the + beginning of the script + -C, --ai-settings FILE Specifies which ai_settings.yaml file to + use, relative to the AutoGPT root directory. + Will also automatically skip the re-prompt. + -P, --prompt-settings FILE Specifies which prompt_settings.yaml file to + use. + -l, --continuous-limit INTEGER Defines the number of times to run in + continuous mode + --speak Enable Speak Mode + --debug Enable Debug Mode + --gpt3only Enable GPT3.5 Only Mode + --gpt4only Enable GPT4 Only Mode + -m, --use-memory TEXT Defines which Memory backend to use + -b, --browser-name TEXT Specifies which web-browser to use when + using selenium to scrape the web. + --allow-downloads Dangerous: Allows AutoGPT to download files + natively. + --skip-news Specifies whether to suppress the output of + latest news on startup. + --install-plugin-deps Installs external dependencies for 3rd party + plugins. + --ai-name TEXT AI name override + --ai-role TEXT AI role override + --constraint TEXT Add or override AI constraints to include in + the prompt; may be used multiple times to + pass multiple constraints + --resource TEXT Add or override AI resources to include in + the prompt; may be used multiple times to + pass multiple resources + --best-practice TEXT Add or override AI best practices to include + in the prompt; may be used multiple times to + pass multiple best practices + --override-directives If specified, --constraint, --resource and + --best-practice will override the AI's + directives instead of being appended to them + --help Show this message and exit. +``` +
+ + +
+ +$ ./autogpt.sh serve --help + + +The `serve` sub-command starts AutoGPT wrapped in an Agent Protocol server: + +```shell +$ ./autogpt.sh serve --help +Usage: python -m autogpt serve [OPTIONS] + + Starts an Agent Protocol compliant AutoGPT server, which creates a custom + agent for every task. + +Options: + -P, --prompt-settings FILE Specifies which prompt_settings.yaml file to + use. + --debug Enable Debug Mode + --gpt3only Enable GPT3.5 Only Mode + --gpt4only Enable GPT4 Only Mode + -m, --use-memory TEXT Defines which Memory backend to use + -b, --browser-name TEXT Specifies which web-browser to use when using + selenium to scrape the web. + --allow-downloads Dangerous: Allows AutoGPT to download files + natively. + --install-plugin-deps Installs external dependencies for 3rd party + plugins. + --help Show this message and exit. +``` +
+ +With `serve`, the application exposes an Agent Protocol compliant API and serves a frontend, +by default on `http://localhost:8000`. + +For more comprehensive instructions, see the [user guide][docs/usage]. + +[docs]: https://docs.agpt.co/autogpt +[docs/setup]: https://docs.agpt.co/autogpt/setup +[docs/usage]: https://docs.agpt.co/autogpt/usage +[docs/plugins]: https://docs.agpt.co/autogpt/plugins + +## 📚 Resources +* 📔 AutoGPT [team wiki](https://github.com/Significant-Gravitas/Nexus/wiki) +* 🧮 AutoGPT [project kanban](https://github.com/orgs/Significant-Gravitas/projects/1) +* 🌃 AutoGPT [roadmap](https://github.com/orgs/Significant-Gravitas/projects/2) + +## ⚠️ Limitations + +This experiment aims to showcase the potential of GPT-4 but comes with some limitations: + +1. Not a polished application or product, just an experiment +2. May not perform well in complex, real-world business scenarios. In fact, if it actually does, please share your results! +3. Quite expensive to run, so set and monitor your API key limits with OpenAI! + +## 🛡 Disclaimer + +This project, AutoGPT, is an experimental application and is provided "as-is" without any warranty, express or implied. By using this software, you agree to assume all risks associated with its use, including but not limited to data loss, system failure, or any other issues that may arise. + +The developers and contributors of this project do not accept any responsibility or liability for any losses, damages, or other consequences that may occur as a result of using this software. You are solely responsible for any decisions and actions taken based on the information provided by AutoGPT. + +**Please note that the use of the GPT-4 language model can be expensive due to its token usage.** By utilizing this project, you acknowledge that you are responsible for monitoring and managing your own token usage and the associated costs. It is highly recommended to check your OpenAI API usage regularly and set up any necessary limits or alerts to prevent unexpected charges. + +As an autonomous experiment, AutoGPT may generate content or take actions that are not in line with real-world business practices or legal requirements. It is your responsibility to ensure that any actions or decisions made based on the output of this software comply with all applicable laws, regulations, and ethical standards. The developers and contributors of this project shall not be held responsible for any consequences arising from the use of this software. + +By using AutoGPT, you agree to indemnify, defend, and hold harmless the developers, contributors, and any affiliated parties from and against any and all claims, damages, losses, liabilities, costs, and expenses (including reasonable attorneys' fees) arising from your use of this software or your violation of these terms. + +--- + +In Q2 of 2023, AutoGPT became the fastest growing open-source project in history. Now that the dust has settled, we're committed to continued sustainable development and growth of the project. + +

+ + Star History Chart + +

diff --git a/autogpts/autogpt/agbenchmark_config/.gitignore b/autogpts/autogpt/agbenchmark_config/.gitignore new file mode 100644 index 000000000000..78bb5abd2434 --- /dev/null +++ b/autogpts/autogpt/agbenchmark_config/.gitignore @@ -0,0 +1,3 @@ +logs/ +reports/ +temp_folder/ diff --git a/autogpts/autogpt/agbenchmark_config/__init__.py b/autogpts/autogpt/agbenchmark_config/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/autogpts/autogpt/agbenchmark_config/analyze_reports.py b/autogpts/autogpt/agbenchmark_config/analyze_reports.py new file mode 100644 index 000000000000..2a8f95443fca --- /dev/null +++ b/autogpts/autogpt/agbenchmark_config/analyze_reports.py @@ -0,0 +1,143 @@ +#!/usr/bin/env python3 + +import json +import logging +import re +import sys +from collections import defaultdict +from pathlib import Path + +from tabulate import tabulate + +info = "-v" in sys.argv +debug = "-vv" in sys.argv +granular = "--granular" in sys.argv + +logging.basicConfig( + level=logging.DEBUG if debug else logging.INFO if info else logging.WARNING +) +logger = logging.getLogger(__name__) + +# Get a list of all JSON files in the directory +report_files = [ + report_file + for dir in (Path(__file__).parent / "reports").iterdir() + if re.match(r"^\d{8}T\d{6}_", dir.name) + and (report_file := dir / "report.json").is_file() +] + +labels = list[str]() +runs_per_label = defaultdict[str, int](lambda: 0) +suite_names = list[str]() +test_names = list[str]() + +# Create a dictionary to store grouped success values by suffix and test +grouped_success_values = defaultdict[str, list[str]](list[str]) + +# Loop through each JSON file to collect suffixes and success values +for report_file in sorted(report_files): + with open(report_file) as f: + logger.info(f"Loading {report_file}...") + + data = json.load(f) + if "tests" in data: + test_tree = data["tests"] + label = data["agent_git_commit_sha"].rsplit("/", 1)[1][:7] # commit hash + else: + # Benchmark run still in progress + test_tree = data + label = report_file.parent.name.split("_", 1)[1] + logger.info(f"Run '{label}' seems to be in progress") + + runs_per_label[label] += 1 + + def process_test(test_name: str, test_data: dict): + result_group = grouped_success_values[f"{label}|{test_name}"] + + if "tests" in test_data: + logger.debug(f"{test_name} is a test suite") + + # Test suite + suite_attempted = any( + test["metrics"]["attempted"] for test in test_data["tests"].values() + ) + logger.debug(f"suite_attempted: {suite_attempted}") + if not suite_attempted: + return + + if test_name not in test_names: + test_names.append(test_name) + + if test_data["metrics"]["percentage"] == 0: + result_indicator = "❌" + else: + highest_difficulty = test_data["metrics"]["highest_difficulty"] + result_indicator = { + "interface": "🔌", + "novice": "🌑", + "basic": "🌒", + "intermediate": "🌓", + "advanced": "🌔", + "hard": "🌕", + }[highest_difficulty] + + logger.debug(f"result group: {result_group}") + logger.debug(f"runs_per_label: {runs_per_label[label]}") + if len(result_group) + 1 < runs_per_label[label]: + result_group.extend( + ["❔"] * (runs_per_label[label] - len(result_group) - 1) + ) + result_group.append(result_indicator) + logger.debug(f"result group (after): {result_group}") + + if granular: + for test_name, test in test_data["tests"].items(): + process_test(test_name, test) + return + + test_metrics = test_data["metrics"] + result_indicator = "❔" + + if "attempted" not in test_metrics: + return + elif test_metrics["attempted"]: + if test_name not in test_names: + test_names.append(test_name) + + success_value = test_metrics["success"] + result_indicator = {True: "✅", False: "❌"}[success_value] + + if len(result_group) + 1 < runs_per_label[label]: + result_group.extend( + [" "] * (runs_per_label[label] - len(result_group) - 1) + ) + result_group.append(result_indicator) + + for test_name, suite in test_tree.items(): + try: + process_test(test_name, suite) + except KeyError: + print(f"{test_name}.metrics: {suite['metrics']}") + raise + + if label not in labels: + labels.append(label) + +# Create headers +headers = ["Test Name"] + list(labels) + +# Prepare data for tabulation +table_data = list[list[str]]() +for test_name in test_names: + row = [test_name] + for label in labels: + results = grouped_success_values.get(f"{label}|{test_name}", ["❔"]) + if len(results) < runs_per_label[label]: + results.extend(["❔"] * (runs_per_label[label] - len(results))) + if len(results) > 1 and all(r == "❔" for r in results): + results.clear() + row.append(" ".join(results)) + table_data.append(row) + +# Print tabulated data +print(tabulate(table_data, headers=headers, tablefmt="grid")) diff --git a/autogpts/autogpt/agbenchmark_config/benchmarks.py b/autogpts/autogpt/agbenchmark_config/benchmarks.py new file mode 100644 index 000000000000..61b16984f0c9 --- /dev/null +++ b/autogpts/autogpt/agbenchmark_config/benchmarks.py @@ -0,0 +1,85 @@ +import asyncio +import logging +import sys +from pathlib import Path + +from autogpt.agent_manager.agent_manager import AgentManager +from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings +from autogpt.app.main import _configure_openai_provider, run_interaction_loop +from autogpt.commands import COMMAND_CATEGORIES +from autogpt.config import AIProfile, ConfigBuilder +from autogpt.file_storage import FileStorageBackendName, get_storage +from autogpt.logs.config import configure_logging +from autogpt.models.command_registry import CommandRegistry + +LOG_DIR = Path(__file__).parent / "logs" + + +def run_specific_agent(task: str, continuous_mode: bool = False) -> None: + agent = bootstrap_agent(task, continuous_mode) + asyncio.run(run_interaction_loop(agent)) + + +def bootstrap_agent(task: str, continuous_mode: bool) -> Agent: + configure_logging( + level=logging.DEBUG, + log_dir=LOG_DIR, + plain_console_output=True, + ) + + config = ConfigBuilder.build_config_from_env() + config.continuous_mode = continuous_mode + config.continuous_limit = 20 + config.noninteractive_mode = True + config.memory_backend = "no_memory" + + command_registry = CommandRegistry.with_command_modules(COMMAND_CATEGORIES, config) + + ai_profile = AIProfile( + ai_name="AutoGPT", + ai_role="a multi-purpose AI assistant.", + ai_goals=[task], + ) + + agent_prompt_config = Agent.default_settings.prompt_config.copy(deep=True) + agent_prompt_config.use_functions_api = config.openai_functions + agent_settings = AgentSettings( + name=Agent.default_settings.name, + agent_id=AgentManager.generate_id("AutoGPT-benchmark"), + description=Agent.default_settings.description, + ai_profile=ai_profile, + config=AgentConfiguration( + fast_llm=config.fast_llm, + smart_llm=config.smart_llm, + allow_fs_access=not config.restrict_to_workspace, + use_functions_api=config.openai_functions, + plugins=config.plugins, + ), + prompt_config=agent_prompt_config, + history=Agent.default_settings.history.copy(deep=True), + ) + + local = config.file_storage_backend == FileStorageBackendName.LOCAL + restrict_to_root = not local or config.restrict_to_workspace + file_storage = get_storage( + config.file_storage_backend, root_path="data", restrict_to_root=restrict_to_root + ) + file_storage.initialize() + + agent = Agent( + settings=agent_settings, + llm_provider=_configure_openai_provider(config), + command_registry=command_registry, + file_storage=file_storage, + legacy_config=config, + ) + return agent + + +if __name__ == "__main__": + # The first argument is the script name itself, second is the task + if len(sys.argv) != 2: + print("Usage: python script.py ") + sys.exit(1) + task = sys.argv[1] + run_specific_agent(task, continuous_mode=True) diff --git a/autogpts/autogpt/agbenchmark_config/config.json b/autogpts/autogpt/agbenchmark_config/config.json new file mode 100644 index 000000000000..154fe388fddf --- /dev/null +++ b/autogpts/autogpt/agbenchmark_config/config.json @@ -0,0 +1,8 @@ +{ + "workspace": { + "input": "agbenchmark_config/workspace", + "output": "agbenchmark_config/workspace" + }, + "entry_path": "agbenchmark.benchmarks", + "host": "http://localhost:8000" +} diff --git a/autogpts/autogpt/autogpt.bat b/autogpts/autogpt/autogpt.bat new file mode 100644 index 000000000000..12b89f2123b9 --- /dev/null +++ b/autogpts/autogpt/autogpt.bat @@ -0,0 +1,27 @@ +@echo off +setlocal enabledelayedexpansion + +:FindPythonCommand +for %%A in (python3 python) do ( + where /Q %%A + if !errorlevel! EQU 0 ( + set "PYTHON_CMD=%%A" + goto :Found + ) +) + +echo Python not found. Please install Python. +pause +exit /B 1 + +:Found +%PYTHON_CMD% scripts/check_requirements.py +if errorlevel 1 ( + echo + poetry install --without dev + echo + echo Finished installing packages! Starting AutoGPT... + echo +) +poetry run autogpt %* +pause diff --git a/autogpts/autogpt/autogpt.sh b/autogpts/autogpt/autogpt.sh new file mode 100644 index 000000000000..57e1f4192d22 --- /dev/null +++ b/autogpts/autogpt/autogpt.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +function find_python_command() { + if command -v python3 &> /dev/null + then + echo "python3" + elif command -v python &> /dev/null + then + echo "python" + else + echo "Python not found. Please install Python." + exit 1 + fi +} + +PYTHON_CMD=$(find_python_command) + +if $PYTHON_CMD -c "import sys; sys.exit(sys.version_info < (3, 10))"; then + if ! $PYTHON_CMD scripts/check_requirements.py; then + echo + poetry install --without dev + echo + echo "Finished installing packages! Starting AutoGPT..." + echo + fi + poetry run autogpt "$@" +else + echo "Python 3.10 or higher is required to run Auto GPT." +fi diff --git a/autogpts/autogpt/autogpt/__init__.py b/autogpts/autogpt/autogpt/__init__.py new file mode 100644 index 000000000000..251826fcd566 --- /dev/null +++ b/autogpts/autogpt/autogpt/__init__.py @@ -0,0 +1,7 @@ +import os +import random +import sys + +if "pytest" in sys.argv or "pytest" in sys.modules or os.getenv("CI"): + print("Setting random seed to 42") + random.seed(42) diff --git a/autogpts/autogpt/autogpt/__main__.py b/autogpts/autogpt/autogpt/__main__.py new file mode 100644 index 000000000000..e5b9245666bd --- /dev/null +++ b/autogpts/autogpt/autogpt/__main__.py @@ -0,0 +1,5 @@ +"""AutoGPT: A GPT powered AI Assistant""" +import autogpt.app.cli + +if __name__ == "__main__": + autogpt.app.cli.cli() diff --git a/autogpts/autogpt/autogpt/agent_factory/configurators.py b/autogpts/autogpt/autogpt/agent_factory/configurators.py new file mode 100644 index 000000000000..c938ba506fd9 --- /dev/null +++ b/autogpts/autogpt/autogpt/agent_factory/configurators.py @@ -0,0 +1,124 @@ +from typing import Optional + +from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings +from autogpt.commands import COMMAND_CATEGORIES +from autogpt.config import AIDirectives, AIProfile, Config +from autogpt.core.resource.model_providers import ChatModelProvider +from autogpt.file_storage.base import FileStorage +from autogpt.logs.config import configure_chat_plugins +from autogpt.models.command_registry import CommandRegistry +from autogpt.plugins import scan_plugins + + +def create_agent( + agent_id: str, + task: str, + ai_profile: AIProfile, + app_config: Config, + file_storage: FileStorage, + llm_provider: ChatModelProvider, + directives: Optional[AIDirectives] = None, +) -> Agent: + if not task: + raise ValueError("No task specified for new agent") + if not directives: + directives = AIDirectives.from_file(app_config.prompt_settings_file) + + agent = _configure_agent( + agent_id=agent_id, + task=task, + ai_profile=ai_profile, + directives=directives, + app_config=app_config, + file_storage=file_storage, + llm_provider=llm_provider, + ) + + return agent + + +def configure_agent_with_state( + state: AgentSettings, + app_config: Config, + file_storage: FileStorage, + llm_provider: ChatModelProvider, +) -> Agent: + return _configure_agent( + state=state, + app_config=app_config, + file_storage=file_storage, + llm_provider=llm_provider, + ) + + +def _configure_agent( + app_config: Config, + llm_provider: ChatModelProvider, + file_storage: FileStorage, + agent_id: str = "", + task: str = "", + ai_profile: Optional[AIProfile] = None, + directives: Optional[AIDirectives] = None, + state: Optional[AgentSettings] = None, +) -> Agent: + if not (state or agent_id and task and ai_profile and directives): + raise TypeError( + "Either (state) or (agent_id, task, ai_profile, directives)" + " must be specified" + ) + + app_config.plugins = scan_plugins(app_config) + configure_chat_plugins(app_config) + + # Create a CommandRegistry instance and scan default folder + command_registry = CommandRegistry.with_command_modules( + modules=COMMAND_CATEGORIES, + config=app_config, + ) + + agent_state = state or create_agent_state( + agent_id=agent_id, + task=task, + ai_profile=ai_profile, + directives=directives, + app_config=app_config, + ) + + # TODO: configure memory + + return Agent( + settings=agent_state, + llm_provider=llm_provider, + command_registry=command_registry, + file_storage=file_storage, + legacy_config=app_config, + ) + + +def create_agent_state( + agent_id: str, + task: str, + ai_profile: AIProfile, + directives: AIDirectives, + app_config: Config, +) -> AgentSettings: + agent_prompt_config = Agent.default_settings.prompt_config.copy(deep=True) + agent_prompt_config.use_functions_api = app_config.openai_functions + + return AgentSettings( + agent_id=agent_id, + name=Agent.default_settings.name, + description=Agent.default_settings.description, + task=task, + ai_profile=ai_profile, + directives=directives, + config=AgentConfiguration( + fast_llm=app_config.fast_llm, + smart_llm=app_config.smart_llm, + allow_fs_access=not app_config.restrict_to_workspace, + use_functions_api=app_config.openai_functions, + plugins=app_config.plugins, + ), + prompt_config=agent_prompt_config, + history=Agent.default_settings.history.copy(deep=True), + ) diff --git a/autogpts/autogpt/autogpt/agent_factory/generators.py b/autogpts/autogpt/autogpt/agent_factory/generators.py new file mode 100644 index 000000000000..9f9c44600bac --- /dev/null +++ b/autogpts/autogpt/autogpt/agent_factory/generators.py @@ -0,0 +1,38 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from autogpt.config.ai_directives import AIDirectives +from autogpt.file_storage.base import FileStorage + +from .configurators import _configure_agent +from .profile_generator import generate_agent_profile_for_task + +if TYPE_CHECKING: + from autogpt.agents.agent import Agent + from autogpt.config import Config + from autogpt.core.resource.model_providers.schema import ChatModelProvider + + +async def generate_agent_for_task( + agent_id: str, + task: str, + app_config: Config, + file_storage: FileStorage, + llm_provider: ChatModelProvider, +) -> Agent: + base_directives = AIDirectives.from_file(app_config.prompt_settings_file) + ai_profile, task_directives = await generate_agent_profile_for_task( + task=task, + app_config=app_config, + llm_provider=llm_provider, + ) + return _configure_agent( + agent_id=agent_id, + task=task, + ai_profile=ai_profile, + directives=base_directives + task_directives, + app_config=app_config, + file_storage=file_storage, + llm_provider=llm_provider, + ) diff --git a/autogpts/autogpt/autogpt/agent_factory/profile_generator.py b/autogpts/autogpt/autogpt/agent_factory/profile_generator.py new file mode 100644 index 000000000000..78afbe51a0f7 --- /dev/null +++ b/autogpts/autogpt/autogpt/agent_factory/profile_generator.py @@ -0,0 +1,248 @@ +import json +import logging + +from autogpt.config import AIDirectives, AIProfile, Config +from autogpt.core.configuration import SystemConfiguration, UserConfigurable +from autogpt.core.prompting import ( + ChatPrompt, + LanguageModelClassification, + PromptStrategy, +) +from autogpt.core.resource.model_providers.schema import ( + AssistantChatMessage, + ChatMessage, + ChatModelProvider, + CompletionModelFunction, +) +from autogpt.core.utils.json_schema import JSONSchema + +logger = logging.getLogger(__name__) + + +class AgentProfileGeneratorConfiguration(SystemConfiguration): + model_classification: LanguageModelClassification = UserConfigurable( + default=LanguageModelClassification.SMART_MODEL + ) + _example_call: object = [ + { + "type": "function", + "function": { + "name": "create_agent", + "arguments": { + "name": "CMOGPT", + "description": ( + "a professional digital marketer AI that assists Solopreneurs " + "in growing their businesses by providing " + "world-class expertise in solving marketing problems " + "for SaaS, content products, agencies, and more." + ), + "directives": { + "best_practices": [ + ( + "Engage in effective problem-solving, prioritization, " + "planning, and supporting execution to address your " + "marketing needs as your virtual " + "Chief Marketing Officer." + ), + ( + "Provide specific, actionable, and concise advice to " + "help you make informed decisions without the use of " + "platitudes or overly wordy explanations." + ), + ( + "Identify and prioritize quick wins and cost-effective " + "campaigns that maximize results with minimal time and " + "budget investment." + ), + ( + "Proactively take the lead in guiding you and offering " + "suggestions when faced with unclear information or " + "uncertainty to ensure your marketing strategy remains " + "on track." + ), + ], + "constraints": [ + "Do not suggest illegal or unethical plans or strategies.", + "Take reasonable budgetary limits into account.", + ], + }, + }, + }, + } + ] + system_prompt: str = UserConfigurable( + default=( + "Your job is to respond to a user-defined task, given in triple quotes, by " + "invoking the `create_agent` function to generate an autonomous agent to " + "complete the task. " + "You should supply a role-based name for the agent (_GPT), " + "an informative description for what the agent does, and 1 to 5 directives " + "in each of the categories Best Practices and Constraints, " + "that are optimally aligned with the successful completion " + "of its assigned task.\n" + "\n" + "Example Input:\n" + '"""Help me with marketing my business"""\n\n' + "Example Call:\n" + "```\n" + f"{json.dumps(_example_call, indent=4)}" + "\n```" + ) + ) + user_prompt_template: str = UserConfigurable(default='"""{user_objective}"""') + create_agent_function: dict = UserConfigurable( + default=CompletionModelFunction( + name="create_agent", + description="Create a new autonomous AI agent to complete a given task.", + parameters={ + "name": JSONSchema( + type=JSONSchema.Type.STRING, + description="A short role-based name for an autonomous agent.", + required=True, + ), + "description": JSONSchema( + type=JSONSchema.Type.STRING, + description=( + "An informative one sentence description " + "of what the AI agent does" + ), + required=True, + ), + "directives": JSONSchema( + type=JSONSchema.Type.OBJECT, + properties={ + "best_practices": JSONSchema( + type=JSONSchema.Type.ARRAY, + minItems=1, + maxItems=5, + items=JSONSchema( + type=JSONSchema.Type.STRING, + ), + description=( + "One to five highly effective best practices " + "that are optimally aligned with the completion " + "of the given task" + ), + required=True, + ), + "constraints": JSONSchema( + type=JSONSchema.Type.ARRAY, + minItems=1, + maxItems=5, + items=JSONSchema( + type=JSONSchema.Type.STRING, + ), + description=( + "One to five reasonable and efficacious constraints " + "that are optimally aligned with the completion " + "of the given task" + ), + required=True, + ), + }, + required=True, + ), + }, + ).schema + ) + + +class AgentProfileGenerator(PromptStrategy): + default_configuration: AgentProfileGeneratorConfiguration = ( + AgentProfileGeneratorConfiguration() + ) + + def __init__( + self, + model_classification: LanguageModelClassification, + system_prompt: str, + user_prompt_template: str, + create_agent_function: dict, + ): + self._model_classification = model_classification + self._system_prompt_message = system_prompt + self._user_prompt_template = user_prompt_template + self._create_agent_function = CompletionModelFunction.parse( + create_agent_function + ) + + @property + def model_classification(self) -> LanguageModelClassification: + return self._model_classification + + def build_prompt(self, user_objective: str = "", **kwargs) -> ChatPrompt: + system_message = ChatMessage.system(self._system_prompt_message) + user_message = ChatMessage.user( + self._user_prompt_template.format( + user_objective=user_objective, + ) + ) + prompt = ChatPrompt( + messages=[system_message, user_message], + functions=[self._create_agent_function], + ) + return prompt + + def parse_response_content( + self, + response_content: AssistantChatMessage, + ) -> tuple[AIProfile, AIDirectives]: + """Parse the actual text response from the objective model. + + Args: + response_content: The raw response content from the objective model. + + Returns: + The parsed response. + + """ + try: + if not response_content.tool_calls: + raise ValueError( + f"LLM did not call {self._create_agent_function.name} function; " + "agent profile creation failed" + ) + arguments: object = response_content.tool_calls[0].function.arguments + ai_profile = AIProfile( + ai_name=arguments.get("name"), + ai_role=arguments.get("description"), + ) + ai_directives = AIDirectives( + best_practices=arguments.get("directives", {}).get("best_practices"), + constraints=arguments.get("directives", {}).get("constraints"), + resources=[], + ) + except KeyError: + logger.debug(f"Failed to parse this response content: {response_content}") + raise + return ai_profile, ai_directives + + +async def generate_agent_profile_for_task( + task: str, + app_config: Config, + llm_provider: ChatModelProvider, +) -> tuple[AIProfile, AIDirectives]: + """Generates an AIConfig object from the given string. + + Returns: + AIConfig: The AIConfig object tailored to the user's input + """ + agent_profile_generator = AgentProfileGenerator( + **AgentProfileGenerator.default_configuration.dict() # HACK + ) + + prompt = agent_profile_generator.build_prompt(task) + + # Call LLM with the string as user input + output = await llm_provider.create_chat_completion( + prompt.messages, + model_name=app_config.smart_llm, + functions=prompt.functions, + completion_parser=agent_profile_generator.parse_response_content, + ) + + # Debug LLM Output + logger.debug(f"AI Config Generator Raw Output: {output.response}") + + return output.parsed_result diff --git a/autogpts/autogpt/autogpt/agent_manager/__init__.py b/autogpts/autogpt/autogpt/agent_manager/__init__.py new file mode 100644 index 000000000000..a412566bf351 --- /dev/null +++ b/autogpts/autogpt/autogpt/agent_manager/__init__.py @@ -0,0 +1,3 @@ +from .agent_manager import AgentManager + +__all__ = ["AgentManager"] diff --git a/autogpts/autogpt/autogpt/agent_manager/agent_manager.py b/autogpts/autogpt/autogpt/agent_manager/agent_manager.py new file mode 100644 index 000000000000..fd3becf840c0 --- /dev/null +++ b/autogpts/autogpt/autogpt/agent_manager/agent_manager.py @@ -0,0 +1,45 @@ +from __future__ import annotations + +import uuid +from pathlib import Path + +from autogpt.agents.agent import AgentSettings +from autogpt.file_storage.base import FileStorage + + +class AgentManager: + def __init__(self, file_storage: FileStorage): + self.file_manager = file_storage.clone_with_subroot("agents") + + @staticmethod + def generate_id(agent_name: str) -> str: + """Generate a unique ID for an agent given agent name.""" + unique_id = str(uuid.uuid4())[:8] + return f"{agent_name}-{unique_id}" + + def list_agents(self) -> list[str]: + """Return all agent directories within storage.""" + agent_dirs: list[str] = [] + for dir in self.file_manager.list_folders(): + if self.file_manager.exists(dir / "state.json"): + agent_dirs.append(dir.name) + return agent_dirs + + def get_agent_dir(self, agent_id: str) -> Path: + """Return the directory of the agent with the given ID.""" + assert len(agent_id) > 0 + agent_dir: Path | None = None + if self.file_manager.exists(agent_id): + agent_dir = self.file_manager.root / agent_id + else: + raise FileNotFoundError(f"No agent with ID '{agent_id}'") + return agent_dir + + def load_agent_state(self, agent_id: str) -> AgentSettings: + """Load the state of the agent with the given ID.""" + state_file_path = Path(agent_id) / "state.json" + if not self.file_manager.exists(state_file_path): + raise FileNotFoundError(f"Agent with ID '{agent_id}' has no state.json") + + state = self.file_manager.read_file(state_file_path) + return AgentSettings.parse_raw(state) diff --git a/autogpts/autogpt/autogpt/agents/__init__.py b/autogpts/autogpt/autogpt/agents/__init__.py new file mode 100644 index 000000000000..94a5f42a5874 --- /dev/null +++ b/autogpts/autogpt/autogpt/agents/__init__.py @@ -0,0 +1,4 @@ +from .agent import Agent +from .base import AgentThoughts, BaseAgent, CommandArgs, CommandName + +__all__ = ["BaseAgent", "Agent", "CommandName", "CommandArgs", "AgentThoughts"] diff --git a/autogpts/autogpt/autogpt/agents/agent.py b/autogpts/autogpt/autogpt/agents/agent.py new file mode 100644 index 000000000000..744607682ae9 --- /dev/null +++ b/autogpts/autogpt/autogpt/agents/agent.py @@ -0,0 +1,320 @@ +from __future__ import annotations + +import inspect +import logging +import time +from datetime import datetime +from typing import TYPE_CHECKING, Optional + +import sentry_sdk +from pydantic import Field + +from autogpt.core.configuration import Configurable +from autogpt.core.prompting import ChatPrompt +from autogpt.core.resource.model_providers import ( + AssistantChatMessage, + ChatMessage, + ChatModelProvider, +) +from autogpt.file_storage.base import FileStorage +from autogpt.logs.log_cycle import ( + CURRENT_CONTEXT_FILE_NAME, + NEXT_ACTION_FILE_NAME, + USER_INPUT_FILE_NAME, + LogCycleHandler, +) +from autogpt.logs.utils import fmt_kwargs +from autogpt.models.action_history import ( + Action, + ActionErrorResult, + ActionInterruptedByHuman, + ActionResult, + ActionSuccessResult, +) +from autogpt.models.command import CommandOutput +from autogpt.models.context_item import ContextItem + +from .base import BaseAgent, BaseAgentConfiguration, BaseAgentSettings +from .features.agent_file_manager import AgentFileManagerMixin +from .features.context import ContextMixin +from .features.watchdog import WatchdogMixin +from .prompt_strategies.one_shot import ( + OneShotAgentPromptConfiguration, + OneShotAgentPromptStrategy, +) +from .utils.exceptions import ( + AgentException, + AgentTerminated, + CommandExecutionError, + DuplicateOperationError, + UnknownCommandError, +) + +if TYPE_CHECKING: + from autogpt.config import Config + from autogpt.models.command_registry import CommandRegistry + +logger = logging.getLogger(__name__) + + +class AgentConfiguration(BaseAgentConfiguration): + pass + + +class AgentSettings(BaseAgentSettings): + config: AgentConfiguration = Field(default_factory=AgentConfiguration) + prompt_config: OneShotAgentPromptConfiguration = Field( + default_factory=( + lambda: OneShotAgentPromptStrategy.default_configuration.copy(deep=True) + ) + ) + + +class Agent( + ContextMixin, + AgentFileManagerMixin, + WatchdogMixin, + BaseAgent, + Configurable[AgentSettings], +): + """AutoGPT's primary Agent; uses one-shot prompting.""" + + default_settings: AgentSettings = AgentSettings( + name="Agent", + description=__doc__, + ) + + prompt_strategy: OneShotAgentPromptStrategy + + def __init__( + self, + settings: AgentSettings, + llm_provider: ChatModelProvider, + command_registry: CommandRegistry, + file_storage: FileStorage, + legacy_config: Config, + ): + prompt_strategy = OneShotAgentPromptStrategy( + configuration=settings.prompt_config, + logger=logger, + ) + super().__init__( + settings=settings, + llm_provider=llm_provider, + prompt_strategy=prompt_strategy, + command_registry=command_registry, + file_storage=file_storage, + legacy_config=legacy_config, + ) + + self.created_at = datetime.now().strftime("%Y%m%d_%H%M%S") + """Timestamp the agent was created; only used for structured debug logging.""" + + self.log_cycle_handler = LogCycleHandler() + """LogCycleHandler for structured debug logging.""" + + def build_prompt( + self, + *args, + extra_messages: Optional[list[ChatMessage]] = None, + include_os_info: Optional[bool] = None, + **kwargs, + ) -> ChatPrompt: + if not extra_messages: + extra_messages = [] + + # Clock + extra_messages.append( + ChatMessage.system(f"The current time and date is {time.strftime('%c')}"), + ) + + if include_os_info is None: + include_os_info = self.legacy_config.execute_local_commands + + return super().build_prompt( + *args, + extra_messages=extra_messages, + include_os_info=include_os_info, + **kwargs, + ) + + def on_before_think(self, *args, **kwargs) -> ChatPrompt: + prompt = super().on_before_think(*args, **kwargs) + + self.log_cycle_handler.log_count_within_cycle = 0 + self.log_cycle_handler.log_cycle( + self.ai_profile.ai_name, + self.created_at, + self.config.cycle_count, + prompt.raw(), + CURRENT_CONTEXT_FILE_NAME, + ) + return prompt + + def parse_and_process_response( + self, llm_response: AssistantChatMessage, *args, **kwargs + ) -> Agent.ThoughtProcessOutput: + for plugin in self.config.plugins: + if not plugin.can_handle_post_planning(): + continue + llm_response.content = plugin.post_planning(llm_response.content or "") + + ( + command_name, + arguments, + assistant_reply_dict, + ) = self.prompt_strategy.parse_response_content(llm_response) + + # Check if command_name and arguments are already in the event_history + if self.event_history.matches_last_command(command_name, arguments): + raise DuplicateOperationError( + f"The command {command_name} with arguments {arguments} " + f"has been just executed." + ) + + self.log_cycle_handler.log_cycle( + self.ai_profile.ai_name, + self.created_at, + self.config.cycle_count, + assistant_reply_dict, + NEXT_ACTION_FILE_NAME, + ) + + if command_name: + self.event_history.register_action( + Action( + name=command_name, + args=arguments, + reasoning=assistant_reply_dict["thoughts"]["reasoning"], + ) + ) + + return command_name, arguments, assistant_reply_dict + + async def execute( + self, + command_name: str, + command_args: dict[str, str] = {}, + user_input: str = "", + ) -> ActionResult: + result: ActionResult + + if command_name == "human_feedback": + result = ActionInterruptedByHuman(feedback=user_input) + self.log_cycle_handler.log_cycle( + self.ai_profile.ai_name, + self.created_at, + self.config.cycle_count, + user_input, + USER_INPUT_FILE_NAME, + ) + + else: + for plugin in self.config.plugins: + if not plugin.can_handle_pre_command(): + continue + command_name, command_args = plugin.pre_command( + command_name, command_args + ) + + try: + return_value = await execute_command( + command_name=command_name, + arguments=command_args, + agent=self, + ) + + # Intercept ContextItem if one is returned by the command + if type(return_value) is tuple and isinstance( + return_value[1], ContextItem + ): + context_item = return_value[1] + return_value = return_value[0] + logger.debug( + f"Command {command_name} returned a ContextItem: {context_item}" + ) + self.context.add(context_item) + + result = ActionSuccessResult(outputs=return_value) + except AgentTerminated: + raise + except AgentException as e: + result = ActionErrorResult.from_exception(e) + logger.warning( + f"{command_name}({fmt_kwargs(command_args)}) raised an error: {e}" + ) + sentry_sdk.capture_exception(e) + + result_tlength = self.llm_provider.count_tokens(str(result), self.llm.name) + if result_tlength > self.send_token_limit // 3: + result = ActionErrorResult( + reason=f"Command {command_name} returned too much output. " + "Do not execute this command again with the same arguments." + ) + + for plugin in self.config.plugins: + if not plugin.can_handle_post_command(): + continue + if result.status == "success": + result.outputs = plugin.post_command(command_name, result.outputs) + elif result.status == "error": + result.reason = plugin.post_command(command_name, result.reason) + + # Update action history + self.event_history.register_result(result) + await self.event_history.handle_compression( + self.llm_provider, self.legacy_config + ) + + return result + + +############# +# Utilities # +############# + + +async def execute_command( + command_name: str, + arguments: dict[str, str], + agent: Agent, +) -> CommandOutput: + """Execute the command and return the result + + Args: + command_name (str): The name of the command to execute + arguments (dict): The arguments for the command + agent (Agent): The agent that is executing the command + + Returns: + str: The result of the command + """ + # Execute a native command with the same name or alias, if it exists + if command := agent.command_registry.get_command(command_name): + try: + result = command(**arguments, agent=agent) + if inspect.isawaitable(result): + return await result + return result + except AgentException: + raise + except Exception as e: + raise CommandExecutionError(str(e)) + + # Handle non-native commands (e.g. from plugins) + if agent._prompt_scratchpad: + for name, command in agent._prompt_scratchpad.commands.items(): + if ( + command_name == name + or command_name.lower() == command.description.lower() + ): + try: + return command.method(**arguments) + except AgentException: + raise + except Exception as e: + raise CommandExecutionError(str(e)) + + raise UnknownCommandError( + f"Cannot execute command '{command_name}': unknown command." + ) diff --git a/autogpts/autogpt/autogpt/agents/base.py b/autogpts/autogpt/autogpt/agents/base.py new file mode 100644 index 000000000000..774070e8c6e9 --- /dev/null +++ b/autogpts/autogpt/autogpt/agents/base.py @@ -0,0 +1,396 @@ +from __future__ import annotations + +import logging +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING, Any, Optional + +from auto_gpt_plugin_template import AutoGPTPluginTemplate +from pydantic import Field, validator + +if TYPE_CHECKING: + from autogpt.config import Config + from autogpt.core.prompting.base import PromptStrategy + from autogpt.core.resource.model_providers.schema import ( + AssistantChatMessage, + ChatModelInfo, + ChatModelProvider, + ChatModelResponse, + ) + from autogpt.models.command_registry import CommandRegistry + +from autogpt.agents.utils.prompt_scratchpad import PromptScratchpad +from autogpt.config import ConfigBuilder +from autogpt.config.ai_directives import AIDirectives +from autogpt.config.ai_profile import AIProfile +from autogpt.core.configuration import ( + Configurable, + SystemConfiguration, + SystemSettings, + UserConfigurable, +) +from autogpt.core.prompting.schema import ( + ChatMessage, + ChatPrompt, + CompletionModelFunction, +) +from autogpt.core.resource.model_providers.openai import ( + OPEN_AI_CHAT_MODELS, + OpenAIModelName, +) +from autogpt.core.runner.client_lib.logging.helpers import dump_prompt +from autogpt.file_storage.base import FileStorage +from autogpt.llm.providers.openai import get_openai_command_specs +from autogpt.models.action_history import ActionResult, EpisodicActionHistory +from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT + +logger = logging.getLogger(__name__) + +CommandName = str +CommandArgs = dict[str, str] +AgentThoughts = dict[str, Any] + + +class BaseAgentConfiguration(SystemConfiguration): + allow_fs_access: bool = UserConfigurable(default=False) + + fast_llm: OpenAIModelName = UserConfigurable(default=OpenAIModelName.GPT3_16k) + smart_llm: OpenAIModelName = UserConfigurable(default=OpenAIModelName.GPT4) + use_functions_api: bool = UserConfigurable(default=False) + + default_cycle_instruction: str = DEFAULT_TRIGGERING_PROMPT + """The default instruction passed to the AI for a thinking cycle.""" + + big_brain: bool = UserConfigurable(default=True) + """ + Whether this agent uses the configured smart LLM (default) to think, + as opposed to the configured fast LLM. Enabling this disables hybrid mode. + """ + + cycle_budget: Optional[int] = 1 + """ + The number of cycles that the agent is allowed to run unsupervised. + + `None` for unlimited continuous execution, + `1` to require user approval for every step, + `0` to stop the agent. + """ + + cycles_remaining = cycle_budget + """The number of cycles remaining within the `cycle_budget`.""" + + cycle_count = 0 + """The number of cycles that the agent has run since its initialization.""" + + send_token_limit: Optional[int] = None + """ + The token limit for prompt construction. Should leave room for the completion; + defaults to 75% of `llm.max_tokens`. + """ + + summary_max_tlength: Optional[int] = None + # TODO: move to ActionHistoryConfiguration + + plugins: list[AutoGPTPluginTemplate] = Field(default_factory=list, exclude=True) + + class Config: + arbitrary_types_allowed = True # Necessary for plugins + + @validator("plugins", each_item=True) + def validate_plugins(cls, p: AutoGPTPluginTemplate | Any): + assert issubclass( + p.__class__, AutoGPTPluginTemplate + ), f"{p} does not subclass AutoGPTPluginTemplate" + assert ( + p.__class__.__name__ != "AutoGPTPluginTemplate" + ), f"Plugins must subclass AutoGPTPluginTemplate; {p} is a template instance" + return p + + @validator("use_functions_api") + def validate_openai_functions(cls, v: bool, values: dict[str, Any]): + if v: + smart_llm = values["smart_llm"] + fast_llm = values["fast_llm"] + assert all( + [ + not any(s in name for s in {"-0301", "-0314"}) + for name in {smart_llm, fast_llm} + ] + ), ( + f"Model {smart_llm} does not support OpenAI Functions. " + "Please disable OPENAI_FUNCTIONS or choose a suitable model." + ) + return v + + +class BaseAgentSettings(SystemSettings): + agent_id: str = "" + + ai_profile: AIProfile = Field(default_factory=lambda: AIProfile(ai_name="AutoGPT")) + """The AI profile or "personality" of the agent.""" + + directives: AIDirectives = Field( + default_factory=lambda: AIDirectives.from_file( + ConfigBuilder.default_settings.prompt_settings_file + ) + ) + """Directives (general instructional guidelines) for the agent.""" + + task: str = "Terminate immediately" # FIXME: placeholder for forge.sdk.schema.Task + """The user-given task that the agent is working on.""" + + config: BaseAgentConfiguration = Field(default_factory=BaseAgentConfiguration) + """The configuration for this BaseAgent subsystem instance.""" + + history: EpisodicActionHistory = Field(default_factory=EpisodicActionHistory) + """(STATE) The action history of the agent.""" + + +class BaseAgent(Configurable[BaseAgentSettings], ABC): + """Base class for all AutoGPT agent classes.""" + + ThoughtProcessOutput = tuple[CommandName, CommandArgs, AgentThoughts] + + default_settings = BaseAgentSettings( + name="BaseAgent", + description=__doc__, + ) + + def __init__( + self, + settings: BaseAgentSettings, + llm_provider: ChatModelProvider, + prompt_strategy: PromptStrategy, + command_registry: CommandRegistry, + file_storage: FileStorage, + legacy_config: Config, + ): + self.state = settings + self.config = settings.config + self.ai_profile = settings.ai_profile + self.directives = settings.directives + self.event_history = settings.history + + self.legacy_config = legacy_config + """LEGACY: Monolithic application configuration.""" + + self.llm_provider = llm_provider + + self.prompt_strategy = prompt_strategy + + self.command_registry = command_registry + """The registry containing all commands available to the agent.""" + + self._prompt_scratchpad: PromptScratchpad | None = None + + # Support multi-inheritance and mixins for subclasses + super(BaseAgent, self).__init__() + + logger.debug(f"Created {__class__} '{self.ai_profile.ai_name}'") + + @property + def llm(self) -> ChatModelInfo: + """The LLM that the agent uses to think.""" + llm_name = ( + self.config.smart_llm if self.config.big_brain else self.config.fast_llm + ) + return OPEN_AI_CHAT_MODELS[llm_name] + + @property + def send_token_limit(self) -> int: + return self.config.send_token_limit or self.llm.max_tokens * 3 // 4 + + async def propose_action(self) -> ThoughtProcessOutput: + """Proposes the next action to execute, based on the task and current state. + + Returns: + The command name and arguments, if any, and the agent's thoughts. + """ + + # Scratchpad as surrogate PromptGenerator for plugin hooks + self._prompt_scratchpad = PromptScratchpad() + + prompt: ChatPrompt = self.build_prompt(scratchpad=self._prompt_scratchpad) + prompt = self.on_before_think(prompt, scratchpad=self._prompt_scratchpad) + + logger.debug(f"Executing prompt:\n{dump_prompt(prompt)}") + response = await self.llm_provider.create_chat_completion( + prompt.messages, + functions=get_openai_command_specs( + self.command_registry.list_available_commands(self) + ) + + list(self._prompt_scratchpad.commands.values()) + if self.config.use_functions_api + else [], + model_name=self.llm.name, + completion_parser=lambda r: self.parse_and_process_response( + r, + prompt, + scratchpad=self._prompt_scratchpad, + ), + ) + self.config.cycle_count += 1 + + return self.on_response( + llm_response=response, + prompt=prompt, + scratchpad=self._prompt_scratchpad, + ) + + @abstractmethod + async def execute( + self, + command_name: str, + command_args: dict[str, str] = {}, + user_input: str = "", + ) -> ActionResult: + """Executes the given command, if any, and returns the agent's response. + + Params: + command_name: The name of the command to execute, if any. + command_args: The arguments to pass to the command, if any. + user_input: The user's input, if any. + + Returns: + ActionResult: An object representing the result(s) of the command. + """ + ... + + def build_prompt( + self, + scratchpad: PromptScratchpad, + extra_commands: Optional[list[CompletionModelFunction]] = None, + extra_messages: Optional[list[ChatMessage]] = None, + **extras, + ) -> ChatPrompt: + """Constructs a prompt using `self.prompt_strategy`. + + Params: + scratchpad: An object for plugins to write additional prompt elements to. + (E.g. commands, constraints, best practices) + extra_commands: Additional commands that the agent has access to. + extra_messages: Additional messages to include in the prompt. + """ + if not extra_commands: + extra_commands = [] + if not extra_messages: + extra_messages = [] + + # Apply additions from plugins + for plugin in self.config.plugins: + if not plugin.can_handle_post_prompt(): + continue + plugin.post_prompt(scratchpad) + ai_directives = self.directives.copy(deep=True) + ai_directives.resources += scratchpad.resources + ai_directives.constraints += scratchpad.constraints + ai_directives.best_practices += scratchpad.best_practices + extra_commands += list(scratchpad.commands.values()) + + prompt = self.prompt_strategy.build_prompt( + task=self.state.task, + ai_profile=self.ai_profile, + ai_directives=ai_directives, + commands=get_openai_command_specs( + self.command_registry.list_available_commands(self) + ) + + extra_commands, + event_history=self.event_history, + max_prompt_tokens=self.send_token_limit, + count_tokens=lambda x: self.llm_provider.count_tokens(x, self.llm.name), + count_message_tokens=lambda x: self.llm_provider.count_message_tokens( + x, self.llm.name + ), + extra_messages=extra_messages, + **extras, + ) + + return prompt + + def on_before_think( + self, + prompt: ChatPrompt, + scratchpad: PromptScratchpad, + ) -> ChatPrompt: + """Called after constructing the prompt but before executing it. + + Calls the `on_planning` hook of any enabled and capable plugins, adding their + output to the prompt. + + Params: + prompt: The prompt that is about to be executed. + scratchpad: An object for plugins to write additional prompt elements to. + (E.g. commands, constraints, best practices) + + Returns: + The prompt to execute + """ + current_tokens_used = self.llm_provider.count_message_tokens( + prompt.messages, self.llm.name + ) + plugin_count = len(self.config.plugins) + for i, plugin in enumerate(self.config.plugins): + if not plugin.can_handle_on_planning(): + continue + plugin_response = plugin.on_planning(scratchpad, prompt.raw()) + if not plugin_response or plugin_response == "": + continue + message_to_add = ChatMessage.system(plugin_response) + tokens_to_add = self.llm_provider.count_message_tokens( + message_to_add, self.llm.name + ) + if current_tokens_used + tokens_to_add > self.send_token_limit: + logger.debug(f"Plugin response too long, skipping: {plugin_response}") + logger.debug(f"Plugins remaining at stop: {plugin_count - i}") + break + prompt.messages.insert( + -1, message_to_add + ) # HACK: assumes cycle instruction to be at the end + current_tokens_used += tokens_to_add + return prompt + + def on_response( + self, + llm_response: ChatModelResponse, + prompt: ChatPrompt, + scratchpad: PromptScratchpad, + ) -> ThoughtProcessOutput: + """Called upon receiving a response from the chat model. + + Calls `self.parse_and_process_response()`. + + Params: + llm_response: The raw response from the chat model. + prompt: The prompt that was executed. + scratchpad: An object containing additional prompt elements from plugins. + (E.g. commands, constraints, best practices) + + Returns: + The parsed command name and command args, if any, and the agent thoughts. + """ + + return llm_response.parsed_result + + # TODO: update memory/context + + @abstractmethod + def parse_and_process_response( + self, + llm_response: AssistantChatMessage, + prompt: ChatPrompt, + scratchpad: PromptScratchpad, + ) -> ThoughtProcessOutput: + """Validate, parse & process the LLM's response. + + Must be implemented by derivative classes: no base implementation is provided, + since the implementation depends on the role of the derivative Agent. + + Params: + llm_response: The raw response from the chat model. + prompt: The prompt that was executed. + scratchpad: An object containing additional prompt elements from plugins. + (E.g. commands, constraints, best practices) + + Returns: + The parsed command name and command args, if any, and the agent thoughts. + """ + pass diff --git a/autogpts/autogpt/autogpt/agents/features/agent_file_manager.py b/autogpts/autogpt/autogpt/agents/features/agent_file_manager.py new file mode 100644 index 000000000000..80257fbeae5e --- /dev/null +++ b/autogpts/autogpt/autogpt/agents/features/agent_file_manager.py @@ -0,0 +1,102 @@ +from __future__ import annotations + +import logging +from typing import Optional + +from autogpt.file_storage.base import FileStorage + +from ..base import BaseAgent, BaseAgentSettings + +logger = logging.getLogger(__name__) + + +class AgentFileManagerMixin: + """Mixin that adds file manager (e.g. Agent state) + and workspace manager (e.g. Agent output files) support.""" + + files: FileStorage + """Agent-related files, e.g. state, logs. + Use `workspace` to access the agent's workspace files.""" + + workspace: FileStorage + """Workspace that the agent has access to, e.g. for reading/writing files. + Use `files` to access agent-related files, e.g. state, logs.""" + + STATE_FILE = "state.json" + """The name of the file where the agent's state is stored.""" + + LOGS_FILE = "file_logger.log" + """The name of the file where the agent's logs are stored.""" + + def __init__(self, **kwargs): + # Initialize other bases first, because we need the config from BaseAgent + super(AgentFileManagerMixin, self).__init__(**kwargs) + + if not isinstance(self, BaseAgent): + raise NotImplementedError( + f"{__class__.__name__} can only be applied to BaseAgent derivatives" + ) + + if "file_storage" not in kwargs: + raise ValueError( + "AgentFileManagerMixin requires a file_storage in the constructor." + ) + + state: BaseAgentSettings = getattr(self, "state") + if not state.agent_id: + raise ValueError("Agent must have an ID.") + + file_storage: FileStorage = kwargs["file_storage"] + self.files = file_storage.clone_with_subroot(f"agents/{state.agent_id}/") + self.workspace = file_storage.clone_with_subroot( + f"agents/{state.agent_id}/workspace" + ) + self._file_storage = file_storage + # Read and cache logs + self._file_logs_cache = [] + if self.files.exists(self.LOGS_FILE): + self._file_logs_cache = self.files.read_file(self.LOGS_FILE).split("\n") + + async def log_file_operation(self, content: str) -> None: + """Log a file operation to the agent's log file.""" + logger.debug(f"Logging operation: {content}") + self._file_logs_cache.append(content) + await self.files.write_file( + self.LOGS_FILE, "\n".join(self._file_logs_cache) + "\n" + ) + + def get_file_operation_lines(self) -> list[str]: + """Get the agent's file operation logs as list of strings.""" + return self._file_logs_cache + + async def save_state(self, save_as: Optional[str] = None) -> None: + """Save the agent's state to the state file.""" + state: BaseAgentSettings = getattr(self, "state") + if save_as: + temp_id = state.agent_id + state.agent_id = save_as + self._file_storage.make_dir(f"agents/{save_as}") + # Save state + await self._file_storage.write_file( + f"agents/{save_as}/{self.STATE_FILE}", state.json() + ) + # Copy workspace + self._file_storage.copy( + f"agents/{temp_id}/workspace", + f"agents/{save_as}/workspace", + ) + state.agent_id = temp_id + else: + await self.files.write_file(self.files.root / self.STATE_FILE, state.json()) + + def change_agent_id(self, new_id: str): + """Change the agent's ID and update the file storage accordingly.""" + state: BaseAgentSettings = getattr(self, "state") + # Rename the agent's files and workspace + self._file_storage.rename(f"agents/{state.agent_id}", f"agents/{new_id}") + # Update the file storage objects + self.files = self._file_storage.clone_with_subroot(f"agents/{new_id}/") + self.workspace = self._file_storage.clone_with_subroot( + f"agents/{new_id}/workspace" + ) + state.agent_id = new_id diff --git a/autogpts/autogpt/autogpt/agents/features/context.py b/autogpts/autogpt/autogpt/agents/features/context.py new file mode 100644 index 000000000000..748635ddc110 --- /dev/null +++ b/autogpts/autogpt/autogpt/agents/features/context.py @@ -0,0 +1,82 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Optional + +if TYPE_CHECKING: + from autogpt.core.prompting import ChatPrompt + from autogpt.models.context_item import ContextItem + + from ..base import BaseAgent + +from autogpt.core.resource.model_providers import ChatMessage + + +class AgentContext: + items: list[ContextItem] + + def __init__(self, items: Optional[list[ContextItem]] = None): + self.items = items or [] + + def __bool__(self) -> bool: + return len(self.items) > 0 + + def __contains__(self, item: ContextItem) -> bool: + return any([i.source == item.source for i in self.items]) + + def add(self, item: ContextItem) -> None: + self.items.append(item) + + def close(self, index: int) -> None: + self.items.pop(index - 1) + + def clear(self) -> None: + self.items.clear() + + def format_numbered(self) -> str: + return "\n\n".join([f"{i}. {c.fmt()}" for i, c in enumerate(self.items, 1)]) + + +class ContextMixin: + """Mixin that adds context support to a BaseAgent subclass""" + + context: AgentContext + + def __init__(self, **kwargs: Any): + self.context = AgentContext() + + super(ContextMixin, self).__init__(**kwargs) + + def build_prompt( + self, + *args: Any, + extra_messages: Optional[list[ChatMessage]] = None, + **kwargs: Any, + ) -> ChatPrompt: + if not extra_messages: + extra_messages = [] + + # Add context section to prompt + if self.context: + extra_messages.insert( + 0, + ChatMessage.system( + "## Context\n" + f"{self.context.format_numbered()}\n\n" + "When a context item is no longer needed and you are not done yet, " + "you can hide the item by specifying its number in the list above " + "to `hide_context_item`.", + ), + ) + + return super(ContextMixin, self).build_prompt( + *args, + extra_messages=extra_messages, + **kwargs, + ) # type: ignore + + +def get_agent_context(agent: BaseAgent) -> AgentContext | None: + if isinstance(agent, ContextMixin): + return agent.context + + return None diff --git a/autogpts/autogpt/autogpt/agents/features/watchdog.py b/autogpts/autogpt/autogpt/agents/features/watchdog.py new file mode 100644 index 000000000000..8113def25c1f --- /dev/null +++ b/autogpts/autogpt/autogpt/agents/features/watchdog.py @@ -0,0 +1,76 @@ +from __future__ import annotations + +import logging +from contextlib import ExitStack +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ..base import BaseAgentConfiguration + +from autogpt.models.action_history import EpisodicActionHistory + +from ..base import BaseAgent + +logger = logging.getLogger(__name__) + + +class WatchdogMixin: + """ + Mixin that adds a watchdog feature to an agent class. Whenever the agent starts + looping, the watchdog will switch from the FAST_LLM to the SMART_LLM and re-think. + """ + + config: BaseAgentConfiguration + event_history: EpisodicActionHistory + + def __init__(self, **kwargs) -> None: + # Initialize other bases first, because we need the event_history from BaseAgent + super(WatchdogMixin, self).__init__(**kwargs) + + if not isinstance(self, BaseAgent): + raise NotImplementedError( + f"{__class__.__name__} can only be applied to BaseAgent derivatives" + ) + + async def propose_action(self, *args, **kwargs) -> BaseAgent.ThoughtProcessOutput: + command_name, command_args, thoughts = await super( + WatchdogMixin, self + ).propose_action(*args, **kwargs) + + if not self.config.big_brain and self.config.fast_llm != self.config.smart_llm: + previous_command, previous_command_args = None, None + if len(self.event_history) > 1: + # Detect repetitive commands + previous_cycle = self.event_history.episodes[ + self.event_history.cursor - 1 + ] + previous_command = previous_cycle.action.name + previous_command_args = previous_cycle.action.args + + rethink_reason = "" + + if not command_name: + rethink_reason = "AI did not specify a command" + elif ( + command_name == previous_command + and command_args == previous_command_args + ): + rethink_reason = f"Repititive command detected ({command_name})" + + if rethink_reason: + logger.info(f"{rethink_reason}, re-thinking with SMART_LLM...") + with ExitStack() as stack: + + @stack.callback + def restore_state() -> None: + # Executed after exiting the ExitStack context + self.config.big_brain = False + + # Remove partial record of current cycle + self.event_history.rewind() + + # Switch to SMART_LLM and re-think + self.big_brain = True + return await self.propose_action(*args, **kwargs) + + return command_name, command_args, thoughts diff --git a/autogpts/autogpt/autogpt/agents/prompt_strategies/one_shot.py b/autogpts/autogpt/autogpt/agents/prompt_strategies/one_shot.py new file mode 100644 index 000000000000..28502349d424 --- /dev/null +++ b/autogpts/autogpt/autogpt/agents/prompt_strategies/one_shot.py @@ -0,0 +1,475 @@ +from __future__ import annotations + +import json +import platform +import re +from logging import Logger +from typing import TYPE_CHECKING, Callable, Optional + +import distro + +if TYPE_CHECKING: + from autogpt.agents.agent import Agent + from autogpt.models.action_history import Episode + +from autogpt.agents.utils.exceptions import InvalidAgentResponseError +from autogpt.config import AIDirectives, AIProfile +from autogpt.core.configuration.schema import SystemConfiguration, UserConfigurable +from autogpt.core.prompting import ( + ChatPrompt, + LanguageModelClassification, + PromptStrategy, +) +from autogpt.core.resource.model_providers.schema import ( + AssistantChatMessage, + ChatMessage, + CompletionModelFunction, +) +from autogpt.core.utils.json_schema import JSONSchema +from autogpt.core.utils.json_utils import extract_dict_from_json +from autogpt.prompts.utils import format_numbered_list, indent + + +class OneShotAgentPromptConfiguration(SystemConfiguration): + DEFAULT_BODY_TEMPLATE: str = ( + "## Constraints\n" + "You operate within the following constraints:\n" + "{constraints}\n" + "\n" + "## Resources\n" + "You can leverage access to the following resources:\n" + "{resources}\n" + "\n" + "## Commands\n" + "These are the ONLY commands you can use." + " Any action you perform must be possible through one of these commands:\n" + "{commands}\n" + "\n" + "## Best practices\n" + "{best_practices}" + ) + + DEFAULT_CHOOSE_ACTION_INSTRUCTION: str = ( + "Determine exactly one command to use next based on the given goals " + "and the progress you have made so far, " + "and respond using the JSON schema specified previously:" + ) + + DEFAULT_RESPONSE_SCHEMA = JSONSchema( + type=JSONSchema.Type.OBJECT, + properties={ + "thoughts": JSONSchema( + type=JSONSchema.Type.OBJECT, + required=True, + properties={ + "observations": JSONSchema( + description=( + "Relevant observations from your last action (if any)" + ), + type=JSONSchema.Type.STRING, + required=False, + ), + "text": JSONSchema( + description="Thoughts", + type=JSONSchema.Type.STRING, + required=True, + ), + "reasoning": JSONSchema( + type=JSONSchema.Type.STRING, + required=True, + ), + "self_criticism": JSONSchema( + description="Constructive self-criticism", + type=JSONSchema.Type.STRING, + required=True, + ), + "plan": JSONSchema( + description=( + "Short markdown-style bullet list that conveys the " + "long-term plan" + ), + type=JSONSchema.Type.STRING, + required=True, + ), + "speak": JSONSchema( + description="Summary of thoughts, to say to user", + type=JSONSchema.Type.STRING, + required=True, + ), + }, + ), + "command": JSONSchema( + type=JSONSchema.Type.OBJECT, + required=True, + properties={ + "name": JSONSchema( + type=JSONSchema.Type.STRING, + required=True, + ), + "args": JSONSchema( + type=JSONSchema.Type.OBJECT, + required=True, + ), + }, + ), + }, + ) + + body_template: str = UserConfigurable(default=DEFAULT_BODY_TEMPLATE) + response_schema: dict = UserConfigurable( + default_factory=DEFAULT_RESPONSE_SCHEMA.to_dict + ) + choose_action_instruction: str = UserConfigurable( + default=DEFAULT_CHOOSE_ACTION_INSTRUCTION + ) + use_functions_api: bool = UserConfigurable(default=False) + + ######### + # State # + ######### + # progress_summaries: dict[tuple[int, int], str] = Field( + # default_factory=lambda: {(0, 0): ""} + # ) + + +class OneShotAgentPromptStrategy(PromptStrategy): + default_configuration: OneShotAgentPromptConfiguration = ( + OneShotAgentPromptConfiguration() + ) + + def __init__( + self, + configuration: OneShotAgentPromptConfiguration, + logger: Logger, + ): + self.config = configuration + self.response_schema = JSONSchema.from_dict(configuration.response_schema) + self.logger = logger + + @property + def model_classification(self) -> LanguageModelClassification: + return LanguageModelClassification.FAST_MODEL # FIXME: dynamic switching + + def build_prompt( + self, + *, + task: str, + ai_profile: AIProfile, + ai_directives: AIDirectives, + commands: list[CompletionModelFunction], + event_history: list[Episode], + include_os_info: bool, + max_prompt_tokens: int, + count_tokens: Callable[[str], int], + count_message_tokens: Callable[[ChatMessage | list[ChatMessage]], int], + extra_messages: Optional[list[ChatMessage]] = None, + **extras, + ) -> ChatPrompt: + """Constructs and returns a prompt with the following structure: + 1. System prompt + 2. Message history of the agent, truncated & prepended with running summary + as needed + 3. `cycle_instruction` + """ + if not extra_messages: + extra_messages = [] + + system_prompt = self.build_system_prompt( + ai_profile=ai_profile, + ai_directives=ai_directives, + commands=commands, + include_os_info=include_os_info, + ) + system_prompt_tlength = count_message_tokens(ChatMessage.system(system_prompt)) + + user_task = f'"""{task}"""' + user_task_tlength = count_message_tokens(ChatMessage.user(user_task)) + + response_format_instr = self.response_format_instruction( + self.config.use_functions_api + ) + extra_messages.append(ChatMessage.system(response_format_instr)) + + final_instruction_msg = ChatMessage.user(self.config.choose_action_instruction) + final_instruction_tlength = count_message_tokens(final_instruction_msg) + + if event_history: + progress = self.compile_progress( + event_history, + count_tokens=count_tokens, + max_tokens=( + max_prompt_tokens + - system_prompt_tlength + - user_task_tlength + - final_instruction_tlength + - count_message_tokens(extra_messages) + ), + ) + extra_messages.insert( + 0, + ChatMessage.system(f"## Progress\n\n{progress}"), + ) + + prompt = ChatPrompt( + messages=[ + ChatMessage.system(system_prompt), + ChatMessage.user(user_task), + *extra_messages, + final_instruction_msg, + ], + ) + + return prompt + + def build_system_prompt( + self, + ai_profile: AIProfile, + ai_directives: AIDirectives, + commands: list[CompletionModelFunction], + include_os_info: bool, + ) -> str: + system_prompt_parts = ( + self._generate_intro_prompt(ai_profile) + + (self._generate_os_info() if include_os_info else []) + + [ + self.config.body_template.format( + constraints=format_numbered_list( + ai_directives.constraints + + self._generate_budget_constraint(ai_profile.api_budget) + ), + resources=format_numbered_list(ai_directives.resources), + commands=self._generate_commands_list(commands), + best_practices=format_numbered_list(ai_directives.best_practices), + ) + ] + + [ + "## Your Task\n" + "The user will specify a task for you to execute, in triple quotes," + " in the next message. Your job is to complete the task while following" + " your directives as given above, and terminate when your task is done." + ] + ) + + # Join non-empty parts together into paragraph format + return "\n\n".join(filter(None, system_prompt_parts)).strip("\n") + + def compile_progress( + self, + episode_history: list[Episode], + max_tokens: Optional[int] = None, + count_tokens: Optional[Callable[[str], int]] = None, + ) -> str: + if max_tokens and not count_tokens: + raise ValueError("count_tokens is required if max_tokens is set") + + steps: list[str] = [] + tokens: int = 0 + n_episodes = len(episode_history) + + for i, episode in enumerate(reversed(episode_history)): + # Use full format for the latest 4 steps, summary or format for older steps + if i < 4 or episode.summary is None: + step_content = indent(episode.format(), 2).strip() + else: + step_content = episode.summary + + step = f"* Step {n_episodes - i}: {step_content}" + + if max_tokens and count_tokens: + step_tokens = count_tokens(step) + if tokens + step_tokens > max_tokens: + break + tokens += step_tokens + + steps.insert(0, step) + + return "\n\n".join(steps) + + def response_format_instruction(self, use_functions_api: bool) -> str: + response_schema = self.response_schema.copy(deep=True) + if ( + use_functions_api + and response_schema.properties + and "command" in response_schema.properties + ): + del response_schema.properties["command"] + + # Unindent for performance + response_format = re.sub( + r"\n\s+", + "\n", + response_schema.to_typescript_object_interface("Response"), + ) + + instruction = ( + "Respond with pure JSON containing your thoughts, " "and invoke a tool." + if use_functions_api + else "Respond with pure JSON." + ) + + return ( + f"{instruction} " + "The JSON object should be compatible with the TypeScript type `Response` " + f"from the following:\n{response_format}" + ) + + def _generate_intro_prompt(self, ai_profile: AIProfile) -> list[str]: + """Generates the introduction part of the prompt. + + Returns: + list[str]: A list of strings forming the introduction part of the prompt. + """ + return [ + f"You are {ai_profile.ai_name}, {ai_profile.ai_role.rstrip('.')}.", + "Your decisions must always be made independently without seeking " + "user assistance. Play to your strengths as an LLM and pursue " + "simple strategies with no legal complications.", + ] + + def _generate_os_info(self) -> list[str]: + """Generates the OS information part of the prompt. + + Params: + config (Config): The configuration object. + + Returns: + str: The OS information part of the prompt. + """ + os_name = platform.system() + os_info = ( + platform.platform(terse=True) + if os_name != "Linux" + else distro.name(pretty=True) + ) + return [f"The OS you are running on is: {os_info}"] + + def _generate_budget_constraint(self, api_budget: float) -> list[str]: + """Generates the budget information part of the prompt. + + Returns: + list[str]: The budget information part of the prompt, or an empty list. + """ + if api_budget > 0.0: + return [ + f"It takes money to let you run. " + f"Your API budget is ${api_budget:.3f}" + ] + return [] + + def _generate_commands_list(self, commands: list[CompletionModelFunction]) -> str: + """Lists the commands available to the agent. + + Params: + agent: The agent for which the commands are being listed. + + Returns: + str: A string containing a numbered list of commands. + """ + try: + return format_numbered_list([cmd.fmt_line() for cmd in commands]) + except AttributeError: + self.logger.warning(f"Formatting commands failed. {commands}") + raise + + def parse_response_content( + self, + response: AssistantChatMessage, + ) -> Agent.ThoughtProcessOutput: + if not response.content: + raise InvalidAgentResponseError("Assistant response has no text content") + + self.logger.debug( + "LLM response content:" + + ( + f"\n{response.content}" + if "\n" in response.content + else f" '{response.content}'" + ) + ) + assistant_reply_dict = extract_dict_from_json(response.content) + self.logger.debug( + "Validating object extracted from LLM response:\n" + f"{json.dumps(assistant_reply_dict, indent=4)}" + ) + + response_schema = self.response_schema.copy(deep=True) + if ( + self.config.use_functions_api + and response_schema.properties + and "command" in response_schema.properties + ): + del response_schema.properties["command"] + _, errors = response_schema.validate_object(assistant_reply_dict) + if errors: + raise InvalidAgentResponseError( + "Validation of response failed:\n " + + ";\n ".join([str(e) for e in errors]) + ) + + # Get command name and arguments + command_name, arguments = extract_command( + assistant_reply_dict, response, self.config.use_functions_api + ) + return command_name, arguments, assistant_reply_dict + + +############# +# Utilities # +############# + + +def extract_command( + assistant_reply_json: dict, + assistant_reply: AssistantChatMessage, + use_openai_functions_api: bool, +) -> tuple[str, dict[str, str]]: + """Parse the response and return the command name and arguments + + Args: + assistant_reply_json (dict): The response object from the AI + assistant_reply (AssistantChatMessage): The model response from the AI + config (Config): The config object + + Returns: + tuple: The command name and arguments + + Raises: + json.decoder.JSONDecodeError: If the response is not valid JSON + + Exception: If any other error occurs + """ + if use_openai_functions_api: + if not assistant_reply.tool_calls: + raise InvalidAgentResponseError("Assistant did not use any tools") + assistant_reply_json["command"] = { + "name": assistant_reply.tool_calls[0].function.name, + "args": assistant_reply.tool_calls[0].function.arguments, + } + try: + if not isinstance(assistant_reply_json, dict): + raise InvalidAgentResponseError( + f"The previous message sent was not a dictionary {assistant_reply_json}" + ) + + if "command" not in assistant_reply_json: + raise InvalidAgentResponseError("Missing 'command' object in JSON") + + command = assistant_reply_json["command"] + if not isinstance(command, dict): + raise InvalidAgentResponseError("'command' object is not a dictionary") + + if "name" not in command: + raise InvalidAgentResponseError("Missing 'name' field in 'command' object") + + command_name = command["name"] + + # Use an empty dictionary if 'args' field is not present in 'command' object + arguments = command.get("args", {}) + + return command_name, arguments + + except json.decoder.JSONDecodeError: + raise InvalidAgentResponseError("Invalid JSON") + + except Exception as e: + raise InvalidAgentResponseError(str(e)) diff --git a/autogpts/autogpt/autogpt/agents/utils/exceptions.py b/autogpts/autogpt/autogpt/agents/utils/exceptions.py new file mode 100644 index 000000000000..211c8443c007 --- /dev/null +++ b/autogpts/autogpt/autogpt/agents/utils/exceptions.py @@ -0,0 +1,64 @@ +from typing import Optional + + +class AgentException(Exception): + """Base class for specific exceptions relevant in the execution of Agents""" + + message: str + + hint: Optional[str] = None + """A hint which can be passed to the LLM to reduce reoccurrence of this error""" + + def __init__(self, message: str, *args): + self.message = message + super().__init__(message, *args) + + +class AgentTerminated(AgentException): + """The agent terminated or was terminated""" + + +class AgentFinished(AgentTerminated): + """The agent self-terminated""" + + +class ConfigurationError(AgentException): + """Error caused by invalid, incompatible or otherwise incorrect configuration""" + + +class InvalidAgentResponseError(AgentException): + """The LLM deviated from the prescribed response format""" + + +class UnknownCommandError(AgentException): + """The AI tried to use an unknown command""" + + hint = "Do not try to use this command again." + + +class DuplicateOperationError(AgentException): + """The proposed operation has already been executed""" + + +class CommandExecutionError(AgentException): + """An error occurred when trying to execute the command""" + + +class InvalidArgumentError(CommandExecutionError): + """The command received an invalid argument""" + + +class OperationNotAllowedError(CommandExecutionError): + """The agent is not allowed to execute the proposed operation""" + + +class AccessDeniedError(CommandExecutionError): + """The operation failed because access to a required resource was denied""" + + +class CodeExecutionError(CommandExecutionError): + """The operation (an attempt to run arbitrary code) returned an error""" + + +class TooMuchOutputError(CommandExecutionError): + """The operation generated more output than what the Agent can process""" diff --git a/autogpts/autogpt/autogpt/agents/utils/prompt_scratchpad.py b/autogpts/autogpt/autogpt/agents/utils/prompt_scratchpad.py new file mode 100644 index 000000000000..2244933383e8 --- /dev/null +++ b/autogpts/autogpt/autogpt/agents/utils/prompt_scratchpad.py @@ -0,0 +1,108 @@ +import logging +from typing import Callable + +from pydantic import BaseModel, Field + +from autogpt.core.resource.model_providers.schema import CompletionModelFunction +from autogpt.core.utils.json_schema import JSONSchema + +logger = logging.getLogger("PromptScratchpad") + + +class CallableCompletionModelFunction(CompletionModelFunction): + method: Callable + + +class PromptScratchpad(BaseModel): + commands: dict[str, CallableCompletionModelFunction] = Field(default_factory=dict) + resources: list[str] = Field(default_factory=list) + constraints: list[str] = Field(default_factory=list) + best_practices: list[str] = Field(default_factory=list) + + def add_constraint(self, constraint: str) -> None: + """ + Add a constraint to the constraints list. + + Params: + constraint (str): The constraint to be added. + """ + if constraint not in self.constraints: + self.constraints.append(constraint) + + def add_command( + self, + name: str, + description: str, + params: dict[str, str | dict], + function: Callable, + ) -> None: + """ + Registers a command. + + *Should only be used by plugins.* Native commands should be added + directly to the CommandRegistry. + + Params: + name (str): The name of the command (e.g. `command_name`). + description (str): The description of the command. + params (dict, optional): A dictionary containing argument names and their + types. Defaults to an empty dictionary. + function (callable, optional): A callable function to be called when + the command is executed. Defaults to None. + """ + for p, s in params.items(): + invalid = False + if type(s) is str and s not in JSONSchema.Type._value2member_map_: + invalid = True + logger.warning( + f"Cannot add command '{name}':" + f" parameter '{p}' has invalid type '{s}'." + f" Valid types are: {JSONSchema.Type._value2member_map_.keys()}" + ) + elif isinstance(s, dict): + try: + JSONSchema.from_dict(s) + except KeyError: + invalid = True + if invalid: + return + + command = CallableCompletionModelFunction( + name=name, + description=description, + parameters={ + name: JSONSchema(type=JSONSchema.Type._value2member_map_[spec]) + if type(spec) is str + else JSONSchema.from_dict(spec) + for name, spec in params.items() + }, + method=function, + ) + + if name in self.commands: + if description == self.commands[name].description: + return + logger.warning( + f"Replacing command {self.commands[name]} with conflicting {command}" + ) + self.commands[name] = command + + def add_resource(self, resource: str) -> None: + """ + Add a resource to the resources list. + + Params: + resource (str): The resource to be added. + """ + if resource not in self.resources: + self.resources.append(resource) + + def add_best_practice(self, best_practice: str) -> None: + """ + Add an item to the list of best practices. + + Params: + best_practice (str): The best practice item to be added. + """ + if best_practice not in self.best_practices: + self.best_practices.append(best_practice) diff --git a/autogpts/autogpt/autogpt/app/__init__.py b/autogpts/autogpt/autogpt/app/__init__.py new file mode 100644 index 000000000000..5f5b20ef2311 --- /dev/null +++ b/autogpts/autogpt/autogpt/app/__init__.py @@ -0,0 +1,6 @@ +from dotenv import load_dotenv + +# Load the users .env file into environment variables +load_dotenv(verbose=True, override=True) + +del load_dotenv diff --git a/autogpts/autogpt/autogpt/app/agent_protocol_server.py b/autogpts/autogpt/autogpt/app/agent_protocol_server.py new file mode 100644 index 000000000000..fe0a3a0ee9d9 --- /dev/null +++ b/autogpts/autogpt/autogpt/app/agent_protocol_server.py @@ -0,0 +1,492 @@ +import logging +import os +import pathlib +from collections import defaultdict +from io import BytesIO +from uuid import uuid4 + +import orjson +from fastapi import APIRouter, FastAPI, UploadFile +from fastapi.middleware.cors import CORSMiddleware +from fastapi.responses import RedirectResponse, StreamingResponse +from fastapi.staticfiles import StaticFiles +from forge.sdk.db import AgentDB +from forge.sdk.errors import NotFoundError +from forge.sdk.middlewares import AgentMiddleware +from forge.sdk.model import ( + Artifact, + Step, + StepRequestBody, + Task, + TaskArtifactsListResponse, + TaskListResponse, + TaskRequestBody, + TaskStepsListResponse, +) +from forge.sdk.routes.agent_protocol import base_router +from hypercorn.asyncio import serve as hypercorn_serve +from hypercorn.config import Config as HypercornConfig +from sentry_sdk import set_user + +from autogpt.agent_factory.configurators import configure_agent_with_state +from autogpt.agent_factory.generators import generate_agent_for_task +from autogpt.agent_manager import AgentManager +from autogpt.agents.utils.exceptions import AgentFinished +from autogpt.app.utils import is_port_free +from autogpt.commands.system import finish +from autogpt.commands.user_interaction import ask_user +from autogpt.config import Config +from autogpt.core.resource.model_providers import ChatModelProvider +from autogpt.core.resource.model_providers.openai import OpenAIProvider +from autogpt.core.resource.model_providers.schema import ModelProviderBudget +from autogpt.file_storage import FileStorage +from autogpt.logs.utils import fmt_kwargs +from autogpt.models.action_history import ActionErrorResult, ActionSuccessResult + +logger = logging.getLogger(__name__) + + +class AgentProtocolServer: + _task_budgets: dict[str, ModelProviderBudget] + + def __init__( + self, + app_config: Config, + database: AgentDB, + file_storage: FileStorage, + llm_provider: ChatModelProvider, + ): + self.app_config = app_config + self.db = database + self.file_storage = file_storage + self.llm_provider = llm_provider + self.agent_manager = AgentManager(file_storage) + self._task_budgets = defaultdict(ModelProviderBudget) + + async def start(self, port: int = 8000, router: APIRouter = base_router): + """Start the agent server.""" + logger.debug("Starting the agent server...") + if not is_port_free(port): + logger.error(f"Port {port} is already in use.") + logger.info( + "You can specify a port by either setting the AP_SERVER_PORT " + "environment variable or defining AP_SERVER_PORT in the .env file." + ) + return + + config = HypercornConfig() + config.bind = [f"localhost:{port}"] + app = FastAPI( + title="AutoGPT Server", + description="Forked from AutoGPT Forge; " + "Modified version of The Agent Protocol.", + version="v0.4", + ) + + # Configure CORS middleware + default_origins = [f"http://localhost:{port}"] # Default only local access + configured_origins = [ + origin + for origin in os.getenv("AP_SERVER_CORS_ALLOWED_ORIGINS", "").split(",") + if origin # Empty list if not configured + ] + origins = configured_origins or default_origins + + app.add_middleware( + CORSMiddleware, + allow_origins=origins, + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], + ) + + app.include_router(router, prefix="/ap/v1") + script_dir = os.path.dirname(os.path.realpath(__file__)) + frontend_path = ( + pathlib.Path(script_dir) + .joinpath("../../../../frontend/build/web") + .resolve() + ) + + if os.path.exists(frontend_path): + app.mount("/app", StaticFiles(directory=frontend_path), name="app") + + @app.get("/", include_in_schema=False) + async def root(): + return RedirectResponse(url="/app/index.html", status_code=307) + + else: + logger.warning( + f"Frontend not found. {frontend_path} does not exist. " + "The frontend will not be available." + ) + + # Used to access the methods on this class from API route handlers + app.add_middleware(AgentMiddleware, agent=self) + + config.loglevel = "ERROR" + config.bind = [f"0.0.0.0:{port}"] + + logger.info(f"AutoGPT server starting on http://localhost:{port}") + await hypercorn_serve(app, config) + + async def create_task(self, task_request: TaskRequestBody) -> Task: + """ + Create a task for the agent. + """ + if user_id := (task_request.additional_input or {}).get("user_id"): + set_user({"id": user_id}) + + task = await self.db.create_task( + input=task_request.input, + additional_input=task_request.additional_input, + ) + logger.debug(f"Creating agent for task: '{task.input}'") + task_agent = await generate_agent_for_task( + agent_id=task_agent_id(task.task_id), + task=task.input, + app_config=self.app_config, + file_storage=self.file_storage, + llm_provider=self._get_task_llm_provider(task), + ) + await task_agent.save_state() + + return task + + async def list_tasks(self, page: int = 1, pageSize: int = 10) -> TaskListResponse: + """ + List all tasks that the agent has created. + """ + logger.debug("Listing all tasks...") + tasks, pagination = await self.db.list_tasks(page, pageSize) + response = TaskListResponse(tasks=tasks, pagination=pagination) + return response + + async def get_task(self, task_id: str) -> Task: + """ + Get a task by ID. + """ + logger.debug(f"Getting task with ID: {task_id}...") + task = await self.db.get_task(task_id) + return task + + async def list_steps( + self, task_id: str, page: int = 1, pageSize: int = 10 + ) -> TaskStepsListResponse: + """ + List the IDs of all steps that the task has created. + """ + logger.debug(f"Listing all steps created by task with ID: {task_id}...") + steps, pagination = await self.db.list_steps(task_id, page, pageSize) + response = TaskStepsListResponse(steps=steps, pagination=pagination) + return response + + async def execute_step(self, task_id: str, step_request: StepRequestBody) -> Step: + """Create a step for the task.""" + logger.debug(f"Creating a step for task with ID: {task_id}...") + + # Restore Agent instance + task = await self.get_task(task_id) + agent = configure_agent_with_state( + state=self.agent_manager.load_agent_state(task_agent_id(task_id)), + app_config=self.app_config, + file_storage=self.file_storage, + llm_provider=self._get_task_llm_provider(task), + ) + + if user_id := (task.additional_input or {}).get("user_id"): + set_user({"id": user_id}) + + # According to the Agent Protocol spec, the first execute_step request contains + # the same task input as the parent create_task request. + # To prevent this from interfering with the agent's process, we ignore the input + # of this first step request, and just generate the first step proposal. + is_init_step = not bool(agent.event_history) + execute_command, execute_command_args, execute_result = None, None, None + execute_approved = False + + # HACK: only for compatibility with AGBenchmark + if step_request.input == "y": + step_request.input = "" + + user_input = step_request.input if not is_init_step else "" + + if ( + not is_init_step + and agent.event_history.current_episode + and not agent.event_history.current_episode.result + ): + execute_command = agent.event_history.current_episode.action.name + execute_command_args = agent.event_history.current_episode.action.args + execute_approved = not user_input + + logger.debug( + f"Agent proposed command" + f" {execute_command}({fmt_kwargs(execute_command_args)})." + f" User input/feedback: {repr(user_input)}" + ) + + # Save step request + step = await self.db.create_step( + task_id=task_id, + input=step_request, + is_last=execute_command == finish.__name__ and execute_approved, + ) + agent.llm_provider = self._get_task_llm_provider(task, step.step_id) + + # Execute previously proposed action + if execute_command: + assert execute_command_args is not None + agent.workspace.on_write_file = lambda path: self._on_agent_write_file( + task=task, step=step, relative_path=path + ) + + if execute_command == ask_user.__name__: # HACK + execute_result = ActionSuccessResult(outputs=user_input) + agent.event_history.register_result(execute_result) + elif not execute_command: + execute_result = None + elif execute_approved: + step = await self.db.update_step( + task_id=task_id, + step_id=step.step_id, + status="running", + ) + + try: + # Execute previously proposed action + execute_result = await agent.execute( + command_name=execute_command, + command_args=execute_command_args, + ) + except AgentFinished: + additional_output = {} + task_total_cost = agent.llm_provider.get_incurred_cost() + if task_total_cost > 0: + additional_output["task_total_cost"] = task_total_cost + logger.info( + f"Total LLM cost for task {task_id}: " + f"${round(task_total_cost, 2)}" + ) + + step = await self.db.update_step( + task_id=task_id, + step_id=step.step_id, + output=execute_command_args["reason"], + additional_output=additional_output, + ) + await agent.save_state() + return step + else: + assert user_input + execute_result = await agent.execute( + command_name="human_feedback", # HACK + command_args={}, + user_input=user_input, + ) + + # Propose next action + try: + next_command, next_command_args, raw_output = await agent.propose_action() + logger.debug(f"AI output: {raw_output}") + except Exception as e: + step = await self.db.update_step( + task_id=task_id, + step_id=step.step_id, + status="completed", + output=f"An error occurred while proposing the next action: {e}", + ) + return step + + # Format step output + output = ( + ( + f"`{execute_command}({fmt_kwargs(execute_command_args)})` returned:" + + ("\n\n" if "\n" in str(execute_result) else " ") + + f"{execute_result}\n\n" + ) + if execute_command_args and execute_command != ask_user.__name__ + else "" + ) + output += f"{raw_output['thoughts']['speak']}\n\n" + output += ( + f"Next Command: {next_command}({fmt_kwargs(next_command_args)})" + if next_command != ask_user.__name__ + else next_command_args["question"] + ) + + additional_output = { + **( + { + "last_action": { + "name": execute_command, + "args": execute_command_args, + "result": ( + "" + if execute_result is None + else ( + orjson.loads(execute_result.json()) + if not isinstance(execute_result, ActionErrorResult) + else { + "error": str(execute_result.error), + "reason": execute_result.reason, + } + ) + ), + }, + } + if not is_init_step + else {} + ), + **raw_output, + } + + task_cumulative_cost = agent.llm_provider.get_incurred_cost() + if task_cumulative_cost > 0: + additional_output["task_cumulative_cost"] = task_cumulative_cost + logger.debug( + f"Running total LLM cost for task {task_id}: " + f"${round(task_cumulative_cost, 3)}" + ) + + step = await self.db.update_step( + task_id=task_id, + step_id=step.step_id, + status="completed", + output=output, + additional_output=additional_output, + ) + + await agent.save_state() + return step + + async def _on_agent_write_file( + self, task: Task, step: Step, relative_path: pathlib.Path + ) -> None: + """ + Creates an Artifact for the written file, or updates the Artifact if it exists. + """ + if relative_path.is_absolute(): + raise ValueError(f"File path '{relative_path}' is not relative") + for a in task.artifacts or []: + if a.relative_path == str(relative_path): + logger.debug(f"Updating Artifact after writing to existing file: {a}") + if not a.agent_created: + await self.db.update_artifact(a.artifact_id, agent_created=True) + break + else: + logger.debug(f"Creating Artifact for new file '{relative_path}'") + await self.db.create_artifact( + task_id=step.task_id, + step_id=step.step_id, + file_name=relative_path.parts[-1], + agent_created=True, + relative_path=str(relative_path), + ) + + async def get_step(self, task_id: str, step_id: str) -> Step: + """ + Get a step by ID. + """ + step = await self.db.get_step(task_id, step_id) + return step + + async def list_artifacts( + self, task_id: str, page: int = 1, pageSize: int = 10 + ) -> TaskArtifactsListResponse: + """ + List the artifacts that the task has created. + """ + artifacts, pagination = await self.db.list_artifacts(task_id, page, pageSize) + return TaskArtifactsListResponse(artifacts=artifacts, pagination=pagination) + + async def create_artifact( + self, task_id: str, file: UploadFile, relative_path: str + ) -> Artifact: + """ + Create an artifact for the task. + """ + file_name = file.filename or str(uuid4()) + data = b"" + while contents := file.file.read(1024 * 1024): + data += contents + # Check if relative path ends with filename + if relative_path.endswith(file_name): + file_path = relative_path + else: + file_path = os.path.join(relative_path, file_name) + + workspace = self._get_task_agent_file_workspace(task_id) + await workspace.write_file(file_path, data) + + artifact = await self.db.create_artifact( + task_id=task_id, + file_name=file_name, + relative_path=relative_path, + agent_created=False, + ) + return artifact + + async def get_artifact(self, task_id: str, artifact_id: str) -> StreamingResponse: + """ + Download a task artifact by ID. + """ + try: + workspace = self._get_task_agent_file_workspace(task_id) + artifact = await self.db.get_artifact(artifact_id) + if artifact.file_name not in artifact.relative_path: + file_path = os.path.join(artifact.relative_path, artifact.file_name) + else: + file_path = artifact.relative_path + retrieved_artifact = workspace.read_file(file_path, binary=True) + except NotFoundError: + raise + except FileNotFoundError: + raise + + return StreamingResponse( + BytesIO(retrieved_artifact), + media_type="application/octet-stream", + headers={ + "Content-Disposition": f'attachment; filename="{artifact.file_name}"' + }, + ) + + def _get_task_agent_file_workspace(self, task_id: str | int) -> FileStorage: + agent_id = task_agent_id(task_id) + return self.file_storage.clone_with_subroot(f"agents/{agent_id}/workspace") + + def _get_task_llm_provider( + self, task: Task, step_id: str = "" + ) -> ChatModelProvider: + """ + Configures the LLM provider with headers to link outgoing requests to the task. + """ + task_llm_budget = self._task_budgets[task.task_id] + + task_llm_provider_config = self.llm_provider._configuration.copy(deep=True) + _extra_request_headers = task_llm_provider_config.extra_request_headers + _extra_request_headers["AP-TaskID"] = task.task_id + if step_id: + _extra_request_headers["AP-StepID"] = step_id + if task.additional_input and (user_id := task.additional_input.get("user_id")): + _extra_request_headers["AutoGPT-UserID"] = user_id + + task_llm_provider = None + if isinstance(self.llm_provider, OpenAIProvider): + settings = self.llm_provider._settings.copy() + settings.budget = task_llm_budget + settings.configuration = task_llm_provider_config # type: ignore + task_llm_provider = OpenAIProvider( + settings=settings, + logger=logger.getChild(f"Task-{task.task_id}_OpenAIProvider"), + ) + + if task_llm_provider and task_llm_provider._budget: + self._task_budgets[task.task_id] = task_llm_provider._budget + + return task_llm_provider or self.llm_provider + + +def task_agent_id(task_id: str | int) -> str: + return f"AutoGPT-{task_id}" diff --git a/autogpts/autogpt/autogpt/app/cli.py b/autogpts/autogpt/autogpt/app/cli.py new file mode 100644 index 000000000000..e6ca0a783873 --- /dev/null +++ b/autogpts/autogpt/autogpt/app/cli.py @@ -0,0 +1,287 @@ +"""Main script for the autogpt package.""" +from logging import _nameToLevel as logLevelMap +from pathlib import Path +from typing import Optional + +import click + +from autogpt.logs.config import LogFormatName + +from .telemetry import setup_telemetry + + +@click.group(invoke_without_command=True) +@click.pass_context +def cli(ctx: click.Context): + setup_telemetry() + + # Invoke `run` by default + if ctx.invoked_subcommand is None: + ctx.invoke(run) + + +@cli.command() +@click.option("-c", "--continuous", is_flag=True, help="Enable Continuous Mode") +@click.option( + "-l", + "--continuous-limit", + type=int, + help="Defines the number of times to run in continuous mode", +) +@click.option("--speak", is_flag=True, help="Enable Speak Mode") +@click.option("--gpt3only", is_flag=True, help="Enable GPT3.5 Only Mode") +@click.option("--gpt4only", is_flag=True, help="Enable GPT4 Only Mode") +@click.option( + "-b", + "--browser-name", + help="Specifies which web-browser to use when using selenium to scrape the web.", +) +@click.option( + "--allow-downloads", + is_flag=True, + help="Dangerous: Allows AutoGPT to download files natively.", +) +@click.option( + # TODO: this is a hidden option for now, necessary for integration testing. + # We should make this public once we're ready to roll out agent specific workspaces. + "--workspace-directory", + "-w", + type=click.Path(file_okay=False), + hidden=True, +) +@click.option( + "--install-plugin-deps", + is_flag=True, + help="Installs external dependencies for 3rd party plugins.", +) +@click.option( + "--skip-news", + is_flag=True, + help="Specifies whether to suppress the output of latest news on startup.", +) +@click.option( + "--skip-reprompt", + "-y", + is_flag=True, + help="Skips the re-prompting messages at the beginning of the script", +) +@click.option( + "--ai-settings", + "-C", + type=click.Path(exists=True, dir_okay=False, path_type=Path), + help=( + "Specifies which ai_settings.yaml file to use, relative to the AutoGPT" + " root directory. Will also automatically skip the re-prompt." + ), +) +@click.option( + "--ai-name", + type=str, + help="AI name override", +) +@click.option( + "--ai-role", + type=str, + help="AI role override", +) +@click.option( + "--prompt-settings", + "-P", + type=click.Path(exists=True, dir_okay=False, path_type=Path), + help="Specifies which prompt_settings.yaml file to use.", +) +@click.option( + "--constraint", + type=str, + multiple=True, + help=( + "Add or override AI constraints to include in the prompt;" + " may be used multiple times to pass multiple constraints" + ), +) +@click.option( + "--resource", + type=str, + multiple=True, + help=( + "Add or override AI resources to include in the prompt;" + " may be used multiple times to pass multiple resources" + ), +) +@click.option( + "--best-practice", + type=str, + multiple=True, + help=( + "Add or override AI best practices to include in the prompt;" + " may be used multiple times to pass multiple best practices" + ), +) +@click.option( + "--override-directives", + is_flag=True, + help=( + "If specified, --constraint, --resource and --best-practice will override" + " the AI's directives instead of being appended to them" + ), +) +@click.option( + "--debug", is_flag=True, help="Implies --log-level=DEBUG --log-format=debug" +) +@click.option("--log-level", type=click.Choice([*logLevelMap.keys()])) +@click.option( + "--log-format", + help=( + "Choose a log format; defaults to 'simple'." + " Also implies --log-file-format, unless it is specified explicitly." + " Using the 'structured_google_cloud' format disables log file output." + ), + type=click.Choice([i.value for i in LogFormatName]), +) +@click.option( + "--log-file-format", + help=( + "Override the format used for the log file output." + " Defaults to the application's global --log-format." + ), + type=click.Choice([i.value for i in LogFormatName]), +) +def run( + continuous: bool, + continuous_limit: Optional[int], + speak: bool, + gpt3only: bool, + gpt4only: bool, + browser_name: Optional[str], + allow_downloads: bool, + workspace_directory: Optional[Path], + install_plugin_deps: bool, + skip_news: bool, + skip_reprompt: bool, + ai_settings: Optional[Path], + ai_name: Optional[str], + ai_role: Optional[str], + prompt_settings: Optional[Path], + resource: tuple[str], + constraint: tuple[str], + best_practice: tuple[str], + override_directives: bool, + debug: bool, + log_level: Optional[str], + log_format: Optional[str], + log_file_format: Optional[str], +) -> None: + """ + Sets up and runs an agent, based on the task specified by the user, or resumes an + existing agent. + """ + # Put imports inside function to avoid importing everything when starting the CLI + from autogpt.app.main import run_auto_gpt + + run_auto_gpt( + continuous=continuous, + continuous_limit=continuous_limit, + ai_settings=ai_settings, + prompt_settings=prompt_settings, + skip_reprompt=skip_reprompt, + speak=speak, + debug=debug, + log_level=log_level, + log_format=log_format, + log_file_format=log_file_format, + gpt3only=gpt3only, + gpt4only=gpt4only, + browser_name=browser_name, + allow_downloads=allow_downloads, + skip_news=skip_news, + workspace_directory=workspace_directory, + install_plugin_deps=install_plugin_deps, + override_ai_name=ai_name, + override_ai_role=ai_role, + resources=list(resource), + constraints=list(constraint), + best_practices=list(best_practice), + override_directives=override_directives, + ) + + +@cli.command() +@click.option( + "--prompt-settings", + "-P", + type=click.Path(exists=True, dir_okay=False, path_type=Path), + help="Specifies which prompt_settings.yaml file to use.", +) +@click.option("--gpt3only", is_flag=True, help="Enable GPT3.5 Only Mode") +@click.option("--gpt4only", is_flag=True, help="Enable GPT4 Only Mode") +@click.option( + "-b", + "--browser-name", + help="Specifies which web-browser to use when using selenium to scrape the web.", +) +@click.option( + "--allow-downloads", + is_flag=True, + help="Dangerous: Allows AutoGPT to download files natively.", +) +@click.option( + "--install-plugin-deps", + is_flag=True, + help="Installs external dependencies for 3rd party plugins.", +) +@click.option( + "--debug", is_flag=True, help="Implies --log-level=DEBUG --log-format=debug" +) +@click.option("--log-level", type=click.Choice([*logLevelMap.keys()])) +@click.option( + "--log-format", + help=( + "Choose a log format; defaults to 'simple'." + " Also implies --log-file-format, unless it is specified explicitly." + " Using the 'structured_google_cloud' format disables log file output." + ), + type=click.Choice([i.value for i in LogFormatName]), +) +@click.option( + "--log-file-format", + help=( + "Override the format used for the log file output." + " Defaults to the application's global --log-format." + ), + type=click.Choice([i.value for i in LogFormatName]), +) +def serve( + prompt_settings: Optional[Path], + gpt3only: bool, + gpt4only: bool, + browser_name: Optional[str], + allow_downloads: bool, + install_plugin_deps: bool, + debug: bool, + log_level: Optional[str], + log_format: Optional[str], + log_file_format: Optional[str], +) -> None: + """ + Starts an Agent Protocol compliant AutoGPT server, which creates a custom agent for + every task. + """ + # Put imports inside function to avoid importing everything when starting the CLI + from autogpt.app.main import run_auto_gpt_server + + run_auto_gpt_server( + prompt_settings=prompt_settings, + debug=debug, + log_level=log_level, + log_format=log_format, + log_file_format=log_file_format, + gpt3only=gpt3only, + gpt4only=gpt4only, + browser_name=browser_name, + allow_downloads=allow_downloads, + install_plugin_deps=install_plugin_deps, + ) + + +if __name__ == "__main__": + cli() diff --git a/autogpts/autogpt/autogpt/app/configurator.py b/autogpts/autogpt/autogpt/app/configurator.py new file mode 100644 index 000000000000..06f976c8345d --- /dev/null +++ b/autogpts/autogpt/autogpt/app/configurator.py @@ -0,0 +1,165 @@ +"""Configurator module.""" +from __future__ import annotations + +import logging +from pathlib import Path +from typing import Literal, Optional + +import click +from colorama import Back, Fore, Style + +from autogpt import utils +from autogpt.config import Config +from autogpt.config.config import GPT_3_MODEL, GPT_4_MODEL +from autogpt.core.resource.model_providers.openai import OpenAIModelName, OpenAIProvider +from autogpt.logs.helpers import request_user_double_check +from autogpt.memory.vector import get_supported_memory_backends + +logger = logging.getLogger(__name__) + + +async def apply_overrides_to_config( + config: Config, + continuous: bool = False, + continuous_limit: Optional[int] = None, + ai_settings_file: Optional[Path] = None, + prompt_settings_file: Optional[Path] = None, + skip_reprompt: bool = False, + gpt3only: bool = False, + gpt4only: bool = False, + memory_type: Optional[str] = None, + browser_name: Optional[str] = None, + allow_downloads: bool = False, + skip_news: bool = False, +) -> None: + """Updates the config object with the given arguments. + + Args: + config (Config): The config object to update. + continuous (bool): Whether to run in continuous mode. + continuous_limit (int): The number of times to run in continuous mode. + ai_settings_file (Path): The path to the ai_settings.yaml file. + prompt_settings_file (Path): The path to the prompt_settings.yaml file. + skip_reprompt (bool): Whether to skip the re-prompting messages on start. + speak (bool): Whether to enable speak mode. + debug (bool): Whether to enable debug mode. + log_level (int): The global log level for the application. + log_format (str): The format for the log(s). + log_file_format (str): Override the format for the log file. + gpt3only (bool): Whether to enable GPT3.5 only mode. + gpt4only (bool): Whether to enable GPT4 only mode. + memory_type (str): The type of memory backend to use. + browser_name (str): The name of the browser to use for scraping the web. + allow_downloads (bool): Whether to allow AutoGPT to download files natively. + skips_news (bool): Whether to suppress the output of latest news on startup. + """ + config.continuous_mode = False + + if continuous: + logger.warning( + "Continuous mode is not recommended. It is potentially dangerous and may" + " cause your AI to run forever or carry out actions you would not usually" + " authorise. Use at your own risk.", + ) + config.continuous_mode = True + + if continuous_limit: + config.continuous_limit = continuous_limit + + # Check if continuous limit is used without continuous mode + if continuous_limit and not continuous: + raise click.UsageError("--continuous-limit can only be used with --continuous") + + # Set the default LLM models + if gpt3only: + # --gpt3only should always use gpt-3.5-turbo, despite user's FAST_LLM config + config.fast_llm = GPT_3_MODEL + config.smart_llm = GPT_3_MODEL + elif ( + gpt4only + and (await check_model(GPT_4_MODEL, model_type="smart_llm")) == GPT_4_MODEL + ): + # --gpt4only should always use gpt-4, despite user's SMART_LLM config + config.fast_llm = GPT_4_MODEL + config.smart_llm = GPT_4_MODEL + else: + config.fast_llm = await check_model(config.fast_llm, "fast_llm") + config.smart_llm = await check_model(config.smart_llm, "smart_llm") + + if memory_type: + supported_memory = get_supported_memory_backends() + chosen = memory_type + if chosen not in supported_memory: + logger.warning( + extra={ + "title": "ONLY THE FOLLOWING MEMORY BACKENDS ARE SUPPORTED:", + "title_color": Fore.RED, + }, + msg=f"{supported_memory}", + ) + else: + config.memory_backend = chosen + + if skip_reprompt: + config.skip_reprompt = True + + if ai_settings_file: + file = ai_settings_file + + # Validate file + (validated, message) = utils.validate_yaml_file(file) + if not validated: + logger.fatal(extra={"title": "FAILED FILE VALIDATION:"}, msg=message) + request_user_double_check() + exit(1) + + config.ai_settings_file = config.project_root / file + config.skip_reprompt = True + + if prompt_settings_file: + file = prompt_settings_file + + # Validate file + (validated, message) = utils.validate_yaml_file(file) + if not validated: + logger.fatal(extra={"title": "FAILED FILE VALIDATION:"}, msg=message) + request_user_double_check() + exit(1) + + config.prompt_settings_file = config.project_root / file + + if browser_name: + config.selenium_web_browser = browser_name + + if allow_downloads: + logger.warning( + msg=f"{Back.LIGHTYELLOW_EX}" + "AutoGPT will now be able to download and save files to your machine." + f"{Back.RESET}" + " It is recommended that you monitor any files it downloads carefully.", + ) + logger.warning( + msg=f"{Back.RED + Style.BRIGHT}" + "NEVER OPEN FILES YOU AREN'T SURE OF!" + f"{Style.RESET_ALL}", + ) + config.allow_downloads = True + + if skip_news: + config.skip_news = True + + +async def check_model( + model_name: OpenAIModelName, model_type: Literal["smart_llm", "fast_llm"] +) -> OpenAIModelName: + """Check if model is available for use. If not, return gpt-3.5-turbo.""" + openai = OpenAIProvider() + models = await openai.get_available_models() + + if any(model_name == m.name for m in models): + return model_name + + logger.warning( + f"You don't have access to {model_name}. Setting {model_type} to {GPT_3_MODEL}." + ) + return GPT_3_MODEL diff --git a/autogpts/autogpt/autogpt/app/main.py b/autogpts/autogpt/autogpt/app/main.py new file mode 100644 index 000000000000..7c8a069a3975 --- /dev/null +++ b/autogpts/autogpt/autogpt/app/main.py @@ -0,0 +1,810 @@ +""" +The application entry point. Can be invoked by a CLI or any other front end application. +""" + +import enum +import logging +import math +import os +import re +import signal +import sys +from pathlib import Path +from types import FrameType +from typing import TYPE_CHECKING, Optional + +from colorama import Fore, Style +from forge.sdk.db import AgentDB + +if TYPE_CHECKING: + from autogpt.agents.agent import Agent + +from autogpt.agent_factory.configurators import configure_agent_with_state, create_agent +from autogpt.agent_factory.profile_generator import generate_agent_profile_for_task +from autogpt.agent_manager import AgentManager +from autogpt.agents import AgentThoughts, CommandArgs, CommandName +from autogpt.agents.utils.exceptions import AgentTerminated, InvalidAgentResponseError +from autogpt.commands.execute_code import ( + is_docker_available, + we_are_running_in_a_docker_container, +) +from autogpt.commands.system import finish +from autogpt.config import ( + AIDirectives, + AIProfile, + Config, + ConfigBuilder, + assert_config_has_openai_api_key, +) +from autogpt.core.resource.model_providers.openai import OpenAIProvider +from autogpt.core.runner.client_lib.utils import coroutine +from autogpt.file_storage import FileStorageBackendName, get_storage +from autogpt.logs.config import configure_chat_plugins, configure_logging +from autogpt.logs.helpers import print_attribute, speak +from autogpt.models.action_history import ActionInterruptedByHuman +from autogpt.plugins import scan_plugins +from scripts.install_plugin_deps import install_plugin_dependencies + +from .configurator import apply_overrides_to_config +from .setup import apply_overrides_to_ai_settings, interactively_revise_ai_settings +from .spinner import Spinner +from .utils import ( + clean_input, + get_legal_warning, + markdown_to_ansi_style, + print_git_branch_info, + print_motd, + print_python_version_info, +) + + +@coroutine +async def run_auto_gpt( + continuous: bool = False, + continuous_limit: Optional[int] = None, + ai_settings: Optional[Path] = None, + prompt_settings: Optional[Path] = None, + skip_reprompt: bool = False, + speak: bool = False, + debug: bool = False, + log_level: Optional[str] = None, + log_format: Optional[str] = None, + log_file_format: Optional[str] = None, + gpt3only: bool = False, + gpt4only: bool = False, + browser_name: Optional[str] = None, + allow_downloads: bool = False, + skip_news: bool = False, + workspace_directory: Optional[Path] = None, + install_plugin_deps: bool = False, + override_ai_name: Optional[str] = None, + override_ai_role: Optional[str] = None, + resources: Optional[list[str]] = None, + constraints: Optional[list[str]] = None, + best_practices: Optional[list[str]] = None, + override_directives: bool = False, +): + # Set up configuration + config = ConfigBuilder.build_config_from_env() + # Storage + local = config.file_storage_backend == FileStorageBackendName.LOCAL + restrict_to_root = not local or config.restrict_to_workspace + file_storage = get_storage( + config.file_storage_backend, root_path="data", restrict_to_root=restrict_to_root + ) + file_storage.initialize() + + # Set up logging module + if speak: + config.tts_config.speak_mode = True + configure_logging( + debug=debug, + level=log_level, + log_format=log_format, + log_file_format=log_file_format, + config=config.logging, + tts_config=config.tts_config, + ) + + # TODO: fill in llm values here + assert_config_has_openai_api_key(config) + + await apply_overrides_to_config( + config=config, + continuous=continuous, + continuous_limit=continuous_limit, + ai_settings_file=ai_settings, + prompt_settings_file=prompt_settings, + skip_reprompt=skip_reprompt, + gpt3only=gpt3only, + gpt4only=gpt4only, + browser_name=browser_name, + allow_downloads=allow_downloads, + skip_news=skip_news, + ) + + llm_provider = _configure_openai_provider(config) + + logger = logging.getLogger(__name__) + + if config.continuous_mode: + for line in get_legal_warning().split("\n"): + logger.warning( + extra={ + "title": "LEGAL:", + "title_color": Fore.RED, + "preserve_color": True, + }, + msg=markdown_to_ansi_style(line), + ) + + if not config.skip_news: + print_motd(config, logger) + print_git_branch_info(logger) + print_python_version_info(logger) + print_attribute("Smart LLM", config.smart_llm) + print_attribute("Fast LLM", config.fast_llm) + print_attribute("Browser", config.selenium_web_browser) + if config.continuous_mode: + print_attribute("Continuous Mode", "ENABLED", title_color=Fore.YELLOW) + if continuous_limit: + print_attribute("Continuous Limit", config.continuous_limit) + if config.tts_config.speak_mode: + print_attribute("Speak Mode", "ENABLED") + if ai_settings: + print_attribute("Using AI Settings File", ai_settings) + if prompt_settings: + print_attribute("Using Prompt Settings File", prompt_settings) + if config.allow_downloads: + print_attribute("Native Downloading", "ENABLED") + if we_are_running_in_a_docker_container() or is_docker_available(): + print_attribute("Code Execution", "ENABLED") + else: + print_attribute( + "Code Execution", + "DISABLED (Docker unavailable)", + title_color=Fore.YELLOW, + ) + + if install_plugin_deps: + install_plugin_dependencies() + + config.plugins = scan_plugins(config) + configure_chat_plugins(config) + + # Let user choose an existing agent to run + agent_manager = AgentManager(file_storage) + existing_agents = agent_manager.list_agents() + load_existing_agent = "" + if existing_agents: + print( + "Existing agents\n---------------\n" + + "\n".join(f"{i} - {id}" for i, id in enumerate(existing_agents, 1)) + ) + load_existing_agent = clean_input( + config, + "Enter the number or name of the agent to run," + " or hit enter to create a new one:", + ) + if re.match(r"^\d+$", load_existing_agent.strip()) and 0 < int( + load_existing_agent + ) <= len(existing_agents): + load_existing_agent = existing_agents[int(load_existing_agent) - 1] + + if load_existing_agent not in existing_agents: + logger.info( + f"Unknown agent '{load_existing_agent}', " + f"creating a new one instead.", + extra={"color": Fore.YELLOW}, + ) + load_existing_agent = "" + + # Either load existing or set up new agent state + agent = None + agent_state = None + + ############################ + # Resume an Existing Agent # + ############################ + if load_existing_agent: + agent_state = None + while True: + answer = clean_input(config, "Resume? [Y/n]") + if answer == "" or answer.lower() == "y": + agent_state = agent_manager.load_agent_state(load_existing_agent) + break + elif answer.lower() == "n": + break + + if agent_state: + agent = configure_agent_with_state( + state=agent_state, + app_config=config, + file_storage=file_storage, + llm_provider=llm_provider, + ) + apply_overrides_to_ai_settings( + ai_profile=agent.state.ai_profile, + directives=agent.state.directives, + override_name=override_ai_name, + override_role=override_ai_role, + resources=resources, + constraints=constraints, + best_practices=best_practices, + replace_directives=override_directives, + ) + + if ( + agent.event_history.current_episode + and agent.event_history.current_episode.action.name == finish.__name__ + and not agent.event_history.current_episode.result + ): + # Agent was resumed after `finish` -> rewrite result of `finish` action + finish_reason = agent.event_history.current_episode.action.args["reason"] + print(f"Agent previously self-terminated; reason: '{finish_reason}'") + new_assignment = clean_input( + config, "Please give a follow-up question or assignment:" + ) + agent.event_history.register_result( + ActionInterruptedByHuman(feedback=new_assignment) + ) + + # If any of these are specified as arguments, + # assume the user doesn't want to revise them + if not any( + [ + override_ai_name, + override_ai_role, + resources, + constraints, + best_practices, + ] + ): + ai_profile, ai_directives = await interactively_revise_ai_settings( + ai_profile=agent.state.ai_profile, + directives=agent.state.directives, + app_config=config, + ) + else: + logger.info("AI config overrides specified through CLI; skipping revision") + + ###################### + # Set up a new Agent # + ###################### + if not agent: + task = "" + while task.strip() == "": + task = clean_input( + config, + "Enter the task that you want AutoGPT to execute," + " with as much detail as possible:", + ) + + base_ai_directives = AIDirectives.from_file(config.prompt_settings_file) + + ai_profile, task_oriented_ai_directives = await generate_agent_profile_for_task( + task, + app_config=config, + llm_provider=llm_provider, + ) + ai_directives = base_ai_directives + task_oriented_ai_directives + apply_overrides_to_ai_settings( + ai_profile=ai_profile, + directives=ai_directives, + override_name=override_ai_name, + override_role=override_ai_role, + resources=resources, + constraints=constraints, + best_practices=best_practices, + replace_directives=override_directives, + ) + + # If any of these are specified as arguments, + # assume the user doesn't want to revise them + if not any( + [ + override_ai_name, + override_ai_role, + resources, + constraints, + best_practices, + ] + ): + ai_profile, ai_directives = await interactively_revise_ai_settings( + ai_profile=ai_profile, + directives=ai_directives, + app_config=config, + ) + else: + logger.info("AI config overrides specified through CLI; skipping revision") + + agent = create_agent( + agent_id=agent_manager.generate_id(ai_profile.ai_name), + task=task, + ai_profile=ai_profile, + directives=ai_directives, + app_config=config, + file_storage=file_storage, + llm_provider=llm_provider, + ) + + if not agent.config.allow_fs_access: + logger.info( + f"{Fore.YELLOW}" + "NOTE: All files/directories created by this agent can be found " + f"inside its workspace at:{Fore.RESET} {agent.workspace.root}", + extra={"preserve_color": True}, + ) + + ################# + # Run the Agent # + ################# + try: + await run_interaction_loop(agent) + except AgentTerminated: + agent_id = agent.state.agent_id + logger.info(f"Saving state of {agent_id}...") + + # Allow user to Save As other ID + save_as_id = clean_input( + config, + f"Press enter to save as '{agent_id}'," + " or enter a different ID to save to:", + ) + # TODO: allow many-to-one relations of agents and workspaces + await agent.save_state(save_as_id if not save_as_id.isspace() else None) + + +@coroutine +async def run_auto_gpt_server( + prompt_settings: Optional[Path] = None, + debug: bool = False, + log_level: Optional[str] = None, + log_format: Optional[str] = None, + log_file_format: Optional[str] = None, + gpt3only: bool = False, + gpt4only: bool = False, + browser_name: Optional[str] = None, + allow_downloads: bool = False, + install_plugin_deps: bool = False, +): + from .agent_protocol_server import AgentProtocolServer + + config = ConfigBuilder.build_config_from_env() + # Storage + local = config.file_storage_backend == FileStorageBackendName.LOCAL + restrict_to_root = not local or config.restrict_to_workspace + file_storage = get_storage( + config.file_storage_backend, root_path="data", restrict_to_root=restrict_to_root + ) + file_storage.initialize() + + # Set up logging module + configure_logging( + debug=debug, + level=log_level, + log_format=log_format, + log_file_format=log_file_format, + config=config.logging, + tts_config=config.tts_config, + ) + + # TODO: fill in llm values here + assert_config_has_openai_api_key(config) + + await apply_overrides_to_config( + config=config, + prompt_settings_file=prompt_settings, + gpt3only=gpt3only, + gpt4only=gpt4only, + browser_name=browser_name, + allow_downloads=allow_downloads, + ) + + llm_provider = _configure_openai_provider(config) + + if install_plugin_deps: + install_plugin_dependencies() + + config.plugins = scan_plugins(config) + + # Set up & start server + database = AgentDB( + database_string=os.getenv("AP_SERVER_DB_URL", "sqlite:///data/ap_server.db"), + debug_enabled=debug, + ) + port: int = int(os.getenv("AP_SERVER_PORT", default=8000)) + server = AgentProtocolServer( + app_config=config, + database=database, + file_storage=file_storage, + llm_provider=llm_provider, + ) + await server.start(port=port) + + logging.getLogger().info( + f"Total OpenAI session cost: " + f"${round(sum(b.total_cost for b in server._task_budgets.values()), 2)}" + ) + + +def _configure_openai_provider(config: Config) -> OpenAIProvider: + """Create a configured OpenAIProvider object. + + Args: + config: The program's configuration. + + Returns: + A configured OpenAIProvider object. + """ + if config.openai_credentials is None: + raise RuntimeError("OpenAI key is not configured") + + openai_settings = OpenAIProvider.default_settings.copy(deep=True) + openai_settings.credentials = config.openai_credentials + return OpenAIProvider( + settings=openai_settings, + logger=logging.getLogger("OpenAIProvider"), + ) + + +def _get_cycle_budget(continuous_mode: bool, continuous_limit: int) -> int | float: + # Translate from the continuous_mode/continuous_limit config + # to a cycle_budget (maximum number of cycles to run without checking in with the + # user) and a count of cycles_remaining before we check in.. + if continuous_mode: + cycle_budget = continuous_limit if continuous_limit else math.inf + else: + cycle_budget = 1 + + return cycle_budget + + +class UserFeedback(str, enum.Enum): + """Enum for user feedback.""" + + AUTHORIZE = "GENERATE NEXT COMMAND JSON" + EXIT = "EXIT" + TEXT = "TEXT" + + +async def run_interaction_loop( + agent: "Agent", +) -> None: + """Run the main interaction loop for the agent. + + Args: + agent: The agent to run the interaction loop for. + + Returns: + None + """ + # These contain both application config and agent config, so grab them here. + legacy_config = agent.legacy_config + ai_profile = agent.ai_profile + logger = logging.getLogger(__name__) + + cycle_budget = cycles_remaining = _get_cycle_budget( + legacy_config.continuous_mode, legacy_config.continuous_limit + ) + spinner = Spinner( + "Thinking...", plain_output=legacy_config.logging.plain_console_output + ) + stop_reason = None + + def graceful_agent_interrupt(signum: int, frame: Optional[FrameType]) -> None: + nonlocal cycle_budget, cycles_remaining, spinner, stop_reason + if stop_reason: + logger.error("Quitting immediately...") + sys.exit() + if cycles_remaining in [0, 1]: + logger.warning("Interrupt signal received: shutting down gracefully.") + logger.warning( + "Press Ctrl+C again if you want to stop AutoGPT immediately." + ) + stop_reason = AgentTerminated("Interrupt signal received") + else: + restart_spinner = spinner.running + if spinner.running: + spinner.stop() + + logger.error( + "Interrupt signal received: stopping continuous command execution." + ) + cycles_remaining = 1 + if restart_spinner: + spinner.start() + + def handle_stop_signal() -> None: + if stop_reason: + raise stop_reason + + # Set up an interrupt signal for the agent. + signal.signal(signal.SIGINT, graceful_agent_interrupt) + + ######################### + # Application Main Loop # + ######################### + + # Keep track of consecutive failures of the agent + consecutive_failures = 0 + + while cycles_remaining > 0: + logger.debug(f"Cycle budget: {cycle_budget}; remaining: {cycles_remaining}") + + ######## + # Plan # + ######## + handle_stop_signal() + # Have the agent determine the next action to take. + with spinner: + try: + ( + command_name, + command_args, + assistant_reply_dict, + ) = await agent.propose_action() + except InvalidAgentResponseError as e: + logger.warning(f"The agent's thoughts could not be parsed: {e}") + consecutive_failures += 1 + if consecutive_failures >= 3: + logger.error( + "The agent failed to output valid thoughts" + f" {consecutive_failures} times in a row. Terminating..." + ) + raise AgentTerminated( + "The agent failed to output valid thoughts" + f" {consecutive_failures} times in a row." + ) + continue + + consecutive_failures = 0 + + ############### + # Update User # + ############### + # Print the assistant's thoughts and the next command to the user. + update_user( + ai_profile, + command_name, + command_args, + assistant_reply_dict, + speak_mode=legacy_config.tts_config.speak_mode, + ) + + ################## + # Get user input # + ################## + handle_stop_signal() + if cycles_remaining == 1: # Last cycle + user_feedback, user_input, new_cycles_remaining = await get_user_feedback( + legacy_config, + ai_profile, + ) + + if user_feedback == UserFeedback.AUTHORIZE: + if new_cycles_remaining is not None: + # Case 1: User is altering the cycle budget. + if cycle_budget > 1: + cycle_budget = new_cycles_remaining + 1 + # Case 2: User is running iteratively and + # has initiated a one-time continuous cycle + cycles_remaining = new_cycles_remaining + 1 + else: + # Case 1: Continuous iteration was interrupted -> resume + if cycle_budget > 1: + logger.info( + f"The cycle budget is {cycle_budget}.", + extra={ + "title": "RESUMING CONTINUOUS EXECUTION", + "title_color": Fore.MAGENTA, + }, + ) + # Case 2: The agent used up its cycle budget -> reset + cycles_remaining = cycle_budget + 1 + logger.info( + "-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=", + extra={"color": Fore.MAGENTA}, + ) + elif user_feedback == UserFeedback.EXIT: + logger.warning("Exiting...") + exit() + else: # user_feedback == UserFeedback.TEXT + command_name = "human_feedback" + else: + user_input = "" + # First log new-line so user can differentiate sections better in console + print() + if cycles_remaining != math.inf: + # Print authorized commands left value + print_attribute( + "AUTHORIZED_COMMANDS_LEFT", cycles_remaining, title_color=Fore.CYAN + ) + + ################### + # Execute Command # + ################### + # Decrement the cycle counter first to reduce the likelihood of a SIGINT + # happening during command execution, setting the cycles remaining to 1, + # and then having the decrement set it to 0, exiting the application. + if command_name != "human_feedback": + cycles_remaining -= 1 + + if not command_name: + continue + + handle_stop_signal() + + if command_name: + result = await agent.execute(command_name, command_args, user_input) + + if result.status == "success": + logger.info( + result, extra={"title": "SYSTEM:", "title_color": Fore.YELLOW} + ) + elif result.status == "error": + logger.warning( + f"Command {command_name} returned an error: " + f"{result.error or result.reason}" + ) + + +def update_user( + ai_profile: AIProfile, + command_name: CommandName, + command_args: CommandArgs, + assistant_reply_dict: AgentThoughts, + speak_mode: bool = False, +) -> None: + """Prints the assistant's thoughts and the next command to the user. + + Args: + config: The program's configuration. + ai_profile: The AI's personality/profile + command_name: The name of the command to execute. + command_args: The arguments for the command. + assistant_reply_dict: The assistant's reply. + """ + logger = logging.getLogger(__name__) + + print_assistant_thoughts( + ai_name=ai_profile.ai_name, + assistant_reply_json_valid=assistant_reply_dict, + speak_mode=speak_mode, + ) + + if speak_mode: + speak(f"I want to execute {command_name}") + + # First log new-line so user can differentiate sections better in console + print() + logger.info( + f"COMMAND = {Fore.CYAN}{remove_ansi_escape(command_name)}{Style.RESET_ALL} " + f"ARGUMENTS = {Fore.CYAN}{command_args}{Style.RESET_ALL}", + extra={ + "title": "NEXT ACTION:", + "title_color": Fore.CYAN, + "preserve_color": True, + }, + ) + + +async def get_user_feedback( + config: Config, + ai_profile: AIProfile, +) -> tuple[UserFeedback, str, int | None]: + """Gets the user's feedback on the assistant's reply. + + Args: + config: The program's configuration. + ai_profile: The AI's configuration. + + Returns: + A tuple of the user's feedback, the user's input, and the number of + cycles remaining if the user has initiated a continuous cycle. + """ + logger = logging.getLogger(__name__) + + # ### GET USER AUTHORIZATION TO EXECUTE COMMAND ### + # Get key press: Prompt the user to press enter to continue or escape + # to exit + logger.info( + f"Enter '{config.authorise_key}' to authorise command, " + f"'{config.authorise_key} -N' to run N continuous commands, " + f"'{config.exit_key}' to exit program, or enter feedback for " + f"{ai_profile.ai_name}..." + ) + + user_feedback = None + user_input = "" + new_cycles_remaining = None + + while user_feedback is None: + # Get input from user + if config.chat_messages_enabled: + console_input = clean_input(config, "Waiting for your response...") + else: + console_input = clean_input( + config, Fore.MAGENTA + "Input:" + Style.RESET_ALL + ) + + # Parse user input + if console_input.lower().strip() == config.authorise_key: + user_feedback = UserFeedback.AUTHORIZE + elif console_input.lower().strip() == "": + logger.warning("Invalid input format.") + elif console_input.lower().startswith(f"{config.authorise_key} -"): + try: + user_feedback = UserFeedback.AUTHORIZE + new_cycles_remaining = abs(int(console_input.split(" ")[1])) + except ValueError: + logger.warning( + f"Invalid input format. " + f"Please enter '{config.authorise_key} -N'" + " where N is the number of continuous tasks." + ) + elif console_input.lower() in [config.exit_key, "exit"]: + user_feedback = UserFeedback.EXIT + else: + user_feedback = UserFeedback.TEXT + user_input = console_input + + return user_feedback, user_input, new_cycles_remaining + + +def print_assistant_thoughts( + ai_name: str, + assistant_reply_json_valid: dict, + speak_mode: bool = False, +) -> None: + logger = logging.getLogger(__name__) + + assistant_thoughts_reasoning = None + assistant_thoughts_plan = None + assistant_thoughts_speak = None + assistant_thoughts_criticism = None + + assistant_thoughts = assistant_reply_json_valid.get("thoughts", {}) + assistant_thoughts_text = remove_ansi_escape(assistant_thoughts.get("text", "")) + if assistant_thoughts: + assistant_thoughts_reasoning = remove_ansi_escape( + assistant_thoughts.get("reasoning", "") + ) + assistant_thoughts_plan = remove_ansi_escape(assistant_thoughts.get("plan", "")) + assistant_thoughts_criticism = remove_ansi_escape( + assistant_thoughts.get("self_criticism", "") + ) + assistant_thoughts_speak = remove_ansi_escape( + assistant_thoughts.get("speak", "") + ) + print_attribute( + f"{ai_name.upper()} THOUGHTS", assistant_thoughts_text, title_color=Fore.YELLOW + ) + print_attribute("REASONING", assistant_thoughts_reasoning, title_color=Fore.YELLOW) + if assistant_thoughts_plan: + print_attribute("PLAN", "", title_color=Fore.YELLOW) + # If it's a list, join it into a string + if isinstance(assistant_thoughts_plan, list): + assistant_thoughts_plan = "\n".join(assistant_thoughts_plan) + elif isinstance(assistant_thoughts_plan, dict): + assistant_thoughts_plan = str(assistant_thoughts_plan) + + # Split the input_string using the newline character and dashes + lines = assistant_thoughts_plan.split("\n") + for line in lines: + line = line.lstrip("- ") + logger.info(line.strip(), extra={"title": "- ", "title_color": Fore.GREEN}) + print_attribute( + "CRITICISM", f"{assistant_thoughts_criticism}", title_color=Fore.YELLOW + ) + + # Speak the assistant's thoughts + if assistant_thoughts_speak: + if speak_mode: + speak(assistant_thoughts_speak) + else: + print_attribute("SPEAK", assistant_thoughts_speak, title_color=Fore.YELLOW) + + +def remove_ansi_escape(s: str) -> str: + return s.replace("\x1B", "") diff --git a/autogpts/autogpt/autogpt/app/setup.py b/autogpts/autogpt/autogpt/app/setup.py new file mode 100644 index 000000000000..94460e62f6f4 --- /dev/null +++ b/autogpts/autogpt/autogpt/app/setup.py @@ -0,0 +1,207 @@ +"""Set up the AI and its goals""" +import logging +from typing import Optional + +from autogpt.app.utils import clean_input +from autogpt.config import AIDirectives, AIProfile, Config +from autogpt.logs.helpers import print_attribute + +logger = logging.getLogger(__name__) + + +def apply_overrides_to_ai_settings( + ai_profile: AIProfile, + directives: AIDirectives, + override_name: Optional[str] = "", + override_role: Optional[str] = "", + replace_directives: bool = False, + resources: Optional[list[str]] = None, + constraints: Optional[list[str]] = None, + best_practices: Optional[list[str]] = None, +): + if override_name: + ai_profile.ai_name = override_name + if override_role: + ai_profile.ai_role = override_role + + if replace_directives: + if resources: + directives.resources = resources + if constraints: + directives.constraints = constraints + if best_practices: + directives.best_practices = best_practices + else: + if resources: + directives.resources += resources + if constraints: + directives.constraints += constraints + if best_practices: + directives.best_practices += best_practices + + +async def interactively_revise_ai_settings( + ai_profile: AIProfile, + directives: AIDirectives, + app_config: Config, +): + """Interactively revise the AI settings. + + Args: + ai_profile (AIConfig): The current AI profile. + ai_directives (AIDirectives): The current AI directives. + app_config (Config): The application configuration. + + Returns: + AIConfig: The revised AI settings. + """ + logger = logging.getLogger("revise_ai_profile") + + revised = False + + while True: + # Print the current AI configuration + print_ai_settings( + title="Current AI Settings" if not revised else "Revised AI Settings", + ai_profile=ai_profile, + directives=directives, + logger=logger, + ) + + if ( + clean_input(app_config, "Continue with these settings? [Y/n]").lower() + or app_config.authorise_key + ) == app_config.authorise_key: + break + + # Ask for revised ai_profile + ai_profile.ai_name = ( + clean_input(app_config, "Enter AI name (or press enter to keep current):") + or ai_profile.ai_name + ) + ai_profile.ai_role = ( + clean_input( + app_config, "Enter new AI role (or press enter to keep current):" + ) + or ai_profile.ai_role + ) + + # Revise constraints + i = 0 + while i < len(directives.constraints): + constraint = directives.constraints[i] + print_attribute(f"Constraint {i+1}:", f'"{constraint}"') + new_constraint = ( + clean_input( + app_config, + f"Enter new constraint {i+1}" + " (press enter to keep current, or '-' to remove):", + ) + or constraint + ) + + if new_constraint == "-": + directives.constraints.remove(constraint) + continue + elif new_constraint: + directives.constraints[i] = new_constraint + + i += 1 + + # Add new constraints + while True: + new_constraint = clean_input( + app_config, + "Press enter to finish, or enter a constraint to add:", + ) + if not new_constraint: + break + directives.constraints.append(new_constraint) + + # Revise resources + i = 0 + while i < len(directives.resources): + resource = directives.resources[i] + print_attribute(f"Resource {i+1}:", f'"{resource}"') + new_resource = ( + clean_input( + app_config, + f"Enter new resource {i+1}" + " (press enter to keep current, or '-' to remove):", + ) + or resource + ) + if new_resource == "-": + directives.resources.remove(resource) + continue + elif new_resource: + directives.resources[i] = new_resource + + i += 1 + + # Add new resources + while True: + new_resource = clean_input( + app_config, + "Press enter to finish, or enter a resource to add:", + ) + if not new_resource: + break + directives.resources.append(new_resource) + + # Revise best practices + i = 0 + while i < len(directives.best_practices): + best_practice = directives.best_practices[i] + print_attribute(f"Best Practice {i+1}:", f'"{best_practice}"') + new_best_practice = ( + clean_input( + app_config, + f"Enter new best practice {i+1}" + " (press enter to keep current, or '-' to remove):", + ) + or best_practice + ) + if new_best_practice == "-": + directives.best_practices.remove(best_practice) + continue + elif new_best_practice: + directives.best_practices[i] = new_best_practice + + i += 1 + + # Add new best practices + while True: + new_best_practice = clean_input( + app_config, + "Press enter to finish, or add a best practice to add:", + ) + if not new_best_practice: + break + directives.best_practices.append(new_best_practice) + + revised = True + + return ai_profile, directives + + +def print_ai_settings( + ai_profile: AIProfile, + directives: AIDirectives, + logger: logging.Logger, + title: str = "AI Settings", +): + print_attribute(title, "") + print_attribute("-" * len(title), "") + print_attribute("Name :", ai_profile.ai_name) + print_attribute("Role :", ai_profile.ai_role) + + print_attribute("Constraints:", "" if directives.constraints else "(none)") + for constraint in directives.constraints: + logger.info(f"- {constraint}") + print_attribute("Resources:", "" if directives.resources else "(none)") + for resource in directives.resources: + logger.info(f"- {resource}") + print_attribute("Best practices:", "" if directives.best_practices else "(none)") + for best_practice in directives.best_practices: + logger.info(f"- {best_practice}") diff --git a/autogpts/autogpt/autogpt/app/spinner.py b/autogpts/autogpt/autogpt/app/spinner.py new file mode 100644 index 000000000000..8b2aa6c3cc6a --- /dev/null +++ b/autogpts/autogpt/autogpt/app/spinner.py @@ -0,0 +1,70 @@ +"""A simple spinner module""" +import itertools +import sys +import threading +import time + + +class Spinner: + """A simple spinner class""" + + def __init__( + self, + message: str = "Loading...", + delay: float = 0.1, + plain_output: bool = False, + ) -> None: + """Initialize the spinner class + + Args: + message (str): The message to display. + delay (float): The delay between each spinner update. + plain_output (bool): Whether to display the spinner or not. + """ + self.plain_output = plain_output + self.spinner = itertools.cycle(["-", "/", "|", "\\"]) + self.delay = delay + self.message = message + self.running = False + self.spinner_thread = None + + def spin(self) -> None: + """Spin the spinner""" + if self.plain_output: + self.print_message() + return + while self.running: + self.print_message() + time.sleep(self.delay) + + def print_message(self): + sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r") + sys.stdout.write(f"{next(self.spinner)} {self.message}\r") + sys.stdout.flush() + + def start(self): + self.running = True + self.spinner_thread = threading.Thread(target=self.spin) + self.spinner_thread.start() + + def stop(self): + self.running = False + if self.spinner_thread is not None: + self.spinner_thread.join() + sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r") + sys.stdout.flush() + + def __enter__(self): + """Start the spinner""" + self.start() + return self + + def __exit__(self, exc_type, exc_value, exc_traceback) -> None: + """Stop the spinner + + Args: + exc_type (Exception): The exception type. + exc_value (Exception): The exception value. + exc_traceback (Exception): The exception traceback. + """ + self.stop() diff --git a/autogpts/autogpt/autogpt/app/telemetry.py b/autogpts/autogpt/autogpt/app/telemetry.py new file mode 100644 index 000000000000..9706781d7ada --- /dev/null +++ b/autogpts/autogpt/autogpt/app/telemetry.py @@ -0,0 +1,64 @@ +import os + +import click +from colorama import Fore, Style + +from .utils import ( + env_file_exists, + get_git_user_email, + set_env_config_value, + vcs_state_diverges_from_master, +) + + +def setup_telemetry() -> None: + if os.getenv("TELEMETRY_OPT_IN") is None: + # If no .env file is present, don't bother asking to enable telemetry, + # to prevent repeated asking in non-persistent environments. + if not env_file_exists(): + return + + allow_telemetry = click.prompt( + f""" +{Style.BRIGHT}❓ Do you want to enable telemetry? ❓{Style.NORMAL} +This means AutoGPT will send diagnostic data to the core development team when something +goes wrong, and will help us to diagnose and fix problems earlier and faster. It also +allows us to collect basic performance data, which helps us find bottlenecks and other +things that slow down the application. + +By entering 'yes', you confirm that you have read and agree to our Privacy Policy, +which is available here: +https://www.notion.so/auto-gpt/Privacy-Policy-ab11c9c20dbd4de1a15dcffe84d77984 + +Please enter 'yes' or 'no'""", + type=bool, + ) + set_env_config_value("TELEMETRY_OPT_IN", "true" if allow_telemetry else "false") + click.echo( + f"❤️ Thank you! Telemetry is {Fore.GREEN}enabled{Fore.RESET}." + if allow_telemetry + else f"👍 Telemetry is {Fore.RED}disabled{Fore.RESET}." + ) + click.echo( + "💡 If you ever change your mind, you can change 'TELEMETRY_OPT_IN' in .env" + ) + click.echo() + + if os.getenv("TELEMETRY_OPT_IN", "").lower() == "true": + _setup_sentry() + + +def _setup_sentry() -> None: + import sentry_sdk + + sentry_sdk.init( + dsn="https://dc266f2f7a2381194d1c0fa36dff67d8@o4505260022104064.ingest.sentry.io/4506739844710400", # noqa + enable_tracing=True, + environment=os.getenv( + "TELEMETRY_ENVIRONMENT", + "production" if not vcs_state_diverges_from_master() else "dev", + ), + ) + + # Allow Sentry to distinguish between users + sentry_sdk.set_user({"email": get_git_user_email(), "ip_address": "{{auto}}"}) diff --git a/autogpts/autogpt/autogpt/app/utils.py b/autogpts/autogpt/autogpt/app/utils.py new file mode 100644 index 000000000000..49a1e2a44fdf --- /dev/null +++ b/autogpts/autogpt/autogpt/app/utils.py @@ -0,0 +1,280 @@ +import contextlib +import logging +import os +import re +import socket +import sys +from pathlib import Path +from typing import TYPE_CHECKING + +import click +import requests +from colorama import Fore, Style +from git import InvalidGitRepositoryError, Repo + +if TYPE_CHECKING: + from autogpt.config import Config + +logger = logging.getLogger(__name__) + + +def clean_input(config: "Config", prompt: str = ""): + try: + if config.chat_messages_enabled: + for plugin in config.plugins: + if not hasattr(plugin, "can_handle_user_input"): + continue + if not plugin.can_handle_user_input(user_input=prompt): + continue + plugin_response = plugin.user_input(user_input=prompt) + if not plugin_response: + continue + if plugin_response.lower() in [ + "yes", + "yeah", + "y", + "ok", + "okay", + "sure", + "alright", + ]: + return config.authorise_key + elif plugin_response.lower() in [ + "no", + "nope", + "n", + "negative", + ]: + return config.exit_key + return plugin_response + + # ask for input, default when just pressing Enter is y + logger.debug("Asking user via keyboard...") + + return click.prompt( + text=prompt, prompt_suffix=" ", default="", show_default=False + ) + except KeyboardInterrupt: + logger.info("You interrupted AutoGPT") + logger.info("Quitting...") + exit(0) + + +def get_bulletin_from_web(): + try: + response = requests.get( + "https://raw.githubusercontent.com/Significant-Gravitas/AutoGPT/master/autogpts/autogpt/BULLETIN.md" # noqa: E501 + ) + if response.status_code == 200: + return response.text + except requests.exceptions.RequestException: + pass + + return "" + + +def get_current_git_branch() -> str: + try: + repo = Repo(search_parent_directories=True) + branch = repo.active_branch + return branch.name + except InvalidGitRepositoryError: + return "" + + +def vcs_state_diverges_from_master() -> bool: + """ + Returns whether a git repo is present and contains changes that are not in `master`. + """ + paths_we_care_about = "autogpts/autogpt/autogpt/**/*.py" + try: + repo = Repo(search_parent_directories=True) + + # Check for uncommitted changes in the specified path + uncommitted_changes = repo.index.diff(None, paths=paths_we_care_about) + if uncommitted_changes: + return True + + # Find OG AutoGPT remote + for remote in repo.remotes: + if remote.url.endswith( + tuple( + # All permutations of old/new repo name and HTTP(S)/Git URLs + f"{prefix}{path}" + for prefix in ("://github.com/", "git@github.com:") + for path in ( + f"Significant-Gravitas/{n}.git" for n in ("AutoGPT", "Auto-GPT") + ) + ) + ): + og_remote = remote + break + else: + # Original AutoGPT remote is not configured: assume local codebase diverges + return True + + master_branch = og_remote.refs.master + with contextlib.suppress(StopIteration): + next(repo.iter_commits(f"HEAD..{master_branch}", paths=paths_we_care_about)) + # Local repo is one or more commits ahead of OG AutoGPT master branch + return True + + # Relevant part of the codebase is on master + return False + except InvalidGitRepositoryError: + # No git repo present: assume codebase is a clean download + return False + + +def get_git_user_email() -> str: + try: + repo = Repo(search_parent_directories=True) + return repo.config_reader().get_value("user", "email", default="") + except InvalidGitRepositoryError: + return "" + + +def get_latest_bulletin() -> tuple[str, bool]: + exists = os.path.exists("data/CURRENT_BULLETIN.md") + current_bulletin = "" + if exists: + current_bulletin = open( + "data/CURRENT_BULLETIN.md", "r", encoding="utf-8" + ).read() + new_bulletin = get_bulletin_from_web() + is_new_news = new_bulletin != "" and new_bulletin != current_bulletin + + news_header = Fore.YELLOW + "Welcome to AutoGPT!\n" + if new_bulletin or current_bulletin: + news_header += ( + "Below you'll find the latest AutoGPT News and feature updates!\n" + "If you don't wish to see this message, you " + "can run AutoGPT with the *--skip-news* flag.\n" + ) + + if new_bulletin and is_new_news: + open("data/CURRENT_BULLETIN.md", "w", encoding="utf-8").write(new_bulletin) + current_bulletin = f"{Fore.RED}::NEW BULLETIN::{Fore.RESET}\n\n{new_bulletin}" + + return f"{news_header}\n{current_bulletin}", is_new_news + + +def markdown_to_ansi_style(markdown: str): + ansi_lines: list[str] = [] + for line in markdown.split("\n"): + line_style = "" + + if line.startswith("# "): + line_style += Style.BRIGHT + else: + line = re.sub( + r"(? str: + legal_text = """ +## DISCLAIMER AND INDEMNIFICATION AGREEMENT +### PLEASE READ THIS DISCLAIMER AND INDEMNIFICATION AGREEMENT CAREFULLY BEFORE USING THE AUTOGPT SYSTEM. BY USING THE AUTOGPT SYSTEM, YOU AGREE TO BE BOUND BY THIS AGREEMENT. + +## Introduction +AutoGPT (the "System") is a project that connects a GPT-like artificial intelligence system to the internet and allows it to automate tasks. While the System is designed to be useful and efficient, there may be instances where the System could perform actions that may cause harm or have unintended consequences. + +## No Liability for Actions of the System +The developers, contributors, and maintainers of the AutoGPT project (collectively, the "Project Parties") make no warranties or representations, express or implied, about the System's performance, accuracy, reliability, or safety. By using the System, you understand and agree that the Project Parties shall not be liable for any actions taken by the System or any consequences resulting from such actions. + +## User Responsibility and Respondeat Superior Liability +As a user of the System, you are responsible for supervising and monitoring the actions of the System while it is operating on your +behalf. You acknowledge that using the System could expose you to potential liability including but not limited to respondeat superior and you agree to assume all risks and liabilities associated with such potential liability. + +## Indemnification +By using the System, you agree to indemnify, defend, and hold harmless the Project Parties from and against any and all claims, liabilities, damages, losses, or expenses (including reasonable attorneys' fees and costs) arising out of or in connection with your use of the System, including, without limitation, any actions taken by the System on your behalf, any failure to properly supervise or monitor the System, and any resulting harm or unintended consequences. + """ # noqa: E501 + return legal_text + + +def print_motd(config: "Config", logger: logging.Logger): + motd, is_new_motd = get_latest_bulletin() + if motd: + motd = markdown_to_ansi_style(motd) + for motd_line in motd.split("\n"): + logger.info( + extra={ + "title": "NEWS:", + "title_color": Fore.GREEN, + "preserve_color": True, + }, + msg=motd_line, + ) + if is_new_motd and not config.chat_messages_enabled: + input( + Fore.MAGENTA + + Style.BRIGHT + + "NEWS: Bulletin was updated! Press Enter to continue..." + + Style.RESET_ALL + ) + + +def print_git_branch_info(logger: logging.Logger): + git_branch = get_current_git_branch() + if git_branch and git_branch != "master": + logger.warning( + f"You are running on `{git_branch}` branch" + " - this is not a supported branch." + ) + + +def print_python_version_info(logger: logging.Logger): + if sys.version_info < (3, 10): + logger.error( + "WARNING: You are running on an older version of Python. " + "Some people have observed problems with certain " + "parts of AutoGPT with this version. " + "Please consider upgrading to Python 3.10 or higher.", + ) + + +ENV_FILE_PATH = Path(__file__).parent.parent.parent / ".env" + + +def env_file_exists() -> bool: + return ENV_FILE_PATH.is_file() + + +def set_env_config_value(key: str, value: str) -> None: + """Sets the specified env variable and updates it in .env as well""" + os.environ[key] = value + + with ENV_FILE_PATH.open("r+") as file: + lines = file.readlines() + file.seek(0) + key_already_in_file = False + for line in lines: + if re.match(rf"^(?:# )?{key}=.*$", line): + file.write(f"{key}={value}\n") + key_already_in_file = True + else: + file.write(line) + + if not key_already_in_file: + file.write(f"{key}={value}\n") + + file.truncate() + + +def is_port_free(port: int, host: str = "127.0.0.1"): + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + try: + s.bind((host, port)) # Try to bind to the port + return True # If successful, the port is free + except OSError: + return False # If failed, the port is likely in use diff --git a/autogpts/autogpt/autogpt/command_decorator.py b/autogpts/autogpt/autogpt/command_decorator.py new file mode 100644 index 000000000000..f53c1ad85e99 --- /dev/null +++ b/autogpts/autogpt/autogpt/command_decorator.py @@ -0,0 +1,70 @@ +from __future__ import annotations + +import functools +import inspect +from typing import TYPE_CHECKING, Any, Callable, Literal, Optional, ParamSpec, TypeVar + +if TYPE_CHECKING: + from autogpt.agents.base import BaseAgent + from autogpt.config import Config + +from autogpt.core.utils.json_schema import JSONSchema +from autogpt.models.command import Command, CommandOutput, CommandParameter + +# Unique identifier for AutoGPT commands +AUTO_GPT_COMMAND_IDENTIFIER = "auto_gpt_command" + +P = ParamSpec("P") +CO = TypeVar("CO", bound=CommandOutput) + + +def command( + name: str, + description: str, + parameters: dict[str, JSONSchema], + enabled: Literal[True] | Callable[[Config], bool] = True, + disabled_reason: Optional[str] = None, + aliases: list[str] = [], + available: bool | Callable[[BaseAgent], bool] = True, +) -> Callable[[Callable[P, CO]], Callable[P, CO]]: + """ + The command decorator is used to create Command objects from ordinary functions. + """ + + def decorator(func: Callable[P, CO]) -> Callable[P, CO]: + typed_parameters = [ + CommandParameter( + name=param_name, + spec=spec, + ) + for param_name, spec in parameters.items() + ] + cmd = Command( + name=name, + description=description, + method=func, + parameters=typed_parameters, + enabled=enabled, + disabled_reason=disabled_reason, + aliases=aliases, + available=available, + ) + + if inspect.iscoroutinefunction(func): + + @functools.wraps(func) + async def wrapper(*args: P.args, **kwargs: P.kwargs) -> Any: + return await func(*args, **kwargs) + + else: + + @functools.wraps(func) + def wrapper(*args: P.args, **kwargs: P.kwargs) -> Any: + return func(*args, **kwargs) + + setattr(wrapper, "command", cmd) + setattr(wrapper, AUTO_GPT_COMMAND_IDENTIFIER, True) + + return wrapper + + return decorator diff --git a/autogpts/autogpt/autogpt/commands/__init__.py b/autogpts/autogpt/autogpt/commands/__init__.py new file mode 100644 index 000000000000..861da27dc637 --- /dev/null +++ b/autogpts/autogpt/autogpt/commands/__init__.py @@ -0,0 +1,9 @@ +COMMAND_CATEGORIES = [ + "autogpt.commands.execute_code", + "autogpt.commands.file_operations", + "autogpt.commands.user_interaction", + "autogpt.commands.web_search", + "autogpt.commands.web_selenium", + "autogpt.commands.system", + "autogpt.commands.image_gen", +] diff --git a/autogpts/autogpt/autogpt/commands/decorators.py b/autogpts/autogpt/autogpt/commands/decorators.py new file mode 100644 index 000000000000..39793a25a1b0 --- /dev/null +++ b/autogpts/autogpt/autogpt/commands/decorators.py @@ -0,0 +1,82 @@ +import functools +import logging +import re +from pathlib import Path +from typing import Callable, ParamSpec, TypeVar + +from autogpt.agents.agent import Agent + +P = ParamSpec("P") +T = TypeVar("T") + +logger = logging.getLogger(__name__) + + +def sanitize_path_arg( + arg_name: str, make_relative: bool = False +) -> Callable[[Callable[P, T]], Callable[P, T]]: + """Sanitizes the specified path (str | Path) argument, resolving it to a Path""" + + def decorator(func: Callable) -> Callable: + # Get position of path parameter, in case it is passed as a positional argument + try: + arg_index = list(func.__annotations__.keys()).index(arg_name) + except ValueError: + raise TypeError( + f"Sanitized parameter '{arg_name}' absent or not annotated" + f" on function '{func.__name__}'" + ) + + # Get position of agent parameter, in case it is passed as a positional argument + try: + agent_arg_index = list(func.__annotations__.keys()).index("agent") + except ValueError: + raise TypeError( + f"Parameter 'agent' absent or not annotated" + f" on function '{func.__name__}'" + ) + + @functools.wraps(func) + def wrapper(*args, **kwargs): + logger.debug(f"Sanitizing arg '{arg_name}' on function '{func.__name__}'") + + # Get Agent from the called function's arguments + agent = kwargs.get( + "agent", len(args) > agent_arg_index and args[agent_arg_index] + ) + if not isinstance(agent, Agent): + raise RuntimeError("Could not get Agent from decorated command's args") + + # Sanitize the specified path argument, if one is given + given_path: str | Path | None = kwargs.get( + arg_name, len(args) > arg_index and args[arg_index] or None + ) + if given_path: + if type(given_path) is str: + # Fix workspace path from output in docker environment + given_path = re.sub(r"^\/workspace", ".", given_path) + + if given_path in {"", "/", "."}: + sanitized_path = agent.workspace.root + else: + sanitized_path = agent.workspace.get_path(given_path) + + # Make path relative if possible + if make_relative and sanitized_path.is_relative_to( + agent.workspace.root + ): + sanitized_path = sanitized_path.relative_to(agent.workspace.root) + + if arg_name in kwargs: + kwargs[arg_name] = sanitized_path + else: + # args is an immutable tuple; must be converted to a list to update + arg_list = list(args) + arg_list[arg_index] = sanitized_path + args = tuple(arg_list) + + return func(*args, **kwargs) + + return wrapper + + return decorator diff --git a/autogpts/autogpt/autogpt/commands/execute_code.py b/autogpts/autogpt/autogpt/commands/execute_code.py new file mode 100644 index 000000000000..a0ef57179b2b --- /dev/null +++ b/autogpts/autogpt/autogpt/commands/execute_code.py @@ -0,0 +1,387 @@ +"""Commands to execute code""" + +import logging +import os +import shlex +import subprocess +from pathlib import Path +from tempfile import NamedTemporaryFile + +import docker +from docker.errors import DockerException, ImageNotFound, NotFound +from docker.models.containers import Container as DockerContainer + +from autogpt.agents.agent import Agent +from autogpt.agents.utils.exceptions import ( + CodeExecutionError, + CommandExecutionError, + InvalidArgumentError, + OperationNotAllowedError, +) +from autogpt.command_decorator import command +from autogpt.config import Config +from autogpt.core.utils.json_schema import JSONSchema + +from .decorators import sanitize_path_arg + +COMMAND_CATEGORY = "execute_code" +COMMAND_CATEGORY_TITLE = "Execute Code" + + +logger = logging.getLogger(__name__) + +ALLOWLIST_CONTROL = "allowlist" +DENYLIST_CONTROL = "denylist" + + +def we_are_running_in_a_docker_container() -> bool: + """Check if we are running in a Docker container + + Returns: + bool: True if we are running in a Docker container, False otherwise + """ + return os.path.exists("/.dockerenv") + + +def is_docker_available() -> bool: + """Check if Docker is available and supports Linux containers + + Returns: + bool: True if Docker is available and supports Linux containers, False otherwise + """ + try: + client = docker.from_env() + docker_info = client.info() + return docker_info["OSType"] == "linux" + except Exception: + return False + + +@command( + "execute_python_code", + "Executes the given Python code inside a single-use Docker container" + " with access to your workspace folder", + { + "code": JSONSchema( + type=JSONSchema.Type.STRING, + description="The Python code to run", + required=True, + ), + }, + disabled_reason="To execute python code agent " + "must be running in a Docker container or " + "Docker must be available on the system.", + available=we_are_running_in_a_docker_container() or is_docker_available(), +) +def execute_python_code(code: str, agent: Agent) -> str: + """ + Create and execute a Python file in a Docker container and return the STDOUT of the + executed code. + + If the code generates any data that needs to be captured, use a print statement. + + Args: + code (str): The Python code to run. + agent (Agent): The Agent executing the command. + + Returns: + str: The STDOUT captured from the code when it ran. + """ + + tmp_code_file = NamedTemporaryFile( + "w", dir=agent.workspace.root, suffix=".py", encoding="utf-8" + ) + tmp_code_file.write(code) + tmp_code_file.flush() + + try: + return execute_python_file(tmp_code_file.name, agent) # type: ignore + except Exception as e: + raise CommandExecutionError(*e.args) + finally: + tmp_code_file.close() + + +@command( + "execute_python_file", + "Execute an existing Python file inside a single-use Docker container" + " with access to your workspace folder", + { + "filename": JSONSchema( + type=JSONSchema.Type.STRING, + description="The name of the file to execute", + required=True, + ), + "args": JSONSchema( + type=JSONSchema.Type.ARRAY, + description="The (command line) arguments to pass to the script", + required=False, + items=JSONSchema(type=JSONSchema.Type.STRING), + ), + }, + disabled_reason="To execute python code agent " + "must be running in a Docker container or " + "Docker must be available on the system.", + available=we_are_running_in_a_docker_container() or is_docker_available(), +) +@sanitize_path_arg("filename") +def execute_python_file( + filename: Path, agent: Agent, args: list[str] | str = [] +) -> str: + """Execute a Python file in a Docker container and return the output + + Args: + filename (Path): The name of the file to execute + args (list, optional): The arguments with which to run the python script + + Returns: + str: The output of the file + """ + logger.info( + f"Executing python file '{filename}' " + f"in working directory '{agent.workspace.root}'" + ) + + if isinstance(args, str): + args = args.split() # Convert space-separated string to a list + + if not str(filename).endswith(".py"): + raise InvalidArgumentError("Invalid file type. Only .py files are allowed.") + + file_path = filename + if not file_path.is_file(): + # Mimic the response that you get from the command line to make it + # intuitively understandable for the LLM + raise FileNotFoundError( + f"python: can't open file '{filename}': [Errno 2] No such file or directory" + ) + + if we_are_running_in_a_docker_container(): + logger.debug( + "AutoGPT is running in a Docker container; " + f"executing {file_path} directly..." + ) + result = subprocess.run( + ["python", "-B", str(file_path)] + args, + capture_output=True, + encoding="utf8", + cwd=str(agent.workspace.root), + ) + if result.returncode == 0: + return result.stdout + else: + raise CodeExecutionError(result.stderr) + + logger.debug("AutoGPT is not running in a Docker container") + try: + assert agent.state.agent_id, "Need Agent ID to attach Docker container" + + client = docker.from_env() + # You can replace this with the desired Python image/version + # You can find available Python images on Docker Hub: + # https://hub.docker.com/_/python + image_name = "python:3-alpine" + container_is_fresh = False + container_name = f"{agent.state.agent_id}_sandbox" + try: + container: DockerContainer = client.containers.get( + container_name + ) # type: ignore + except NotFound: + try: + client.images.get(image_name) + logger.debug(f"Image '{image_name}' found locally") + except ImageNotFound: + logger.info( + f"Image '{image_name}' not found locally," + " pulling from Docker Hub..." + ) + # Use the low-level API to stream the pull response + low_level_client = docker.APIClient() + for line in low_level_client.pull(image_name, stream=True, decode=True): + # Print the status and progress, if available + status = line.get("status") + progress = line.get("progress") + if status and progress: + logger.info(f"{status}: {progress}") + elif status: + logger.info(status) + + logger.debug(f"Creating new {image_name} container...") + container: DockerContainer = client.containers.run( + image_name, + ["sleep", "60"], # Max 60 seconds to prevent permanent hangs + volumes={ + str(agent.workspace.root): { + "bind": "/workspace", + "mode": "rw", + } + }, + working_dir="/workspace", + stderr=True, + stdout=True, + detach=True, + name=container_name, + ) # type: ignore + container_is_fresh = True + + if not container.status == "running": + container.start() + elif not container_is_fresh: + container.restart() + + logger.debug(f"Running {file_path} in container {container.name}...") + exec_result = container.exec_run( + [ + "python", + "-B", + file_path.relative_to(agent.workspace.root).as_posix(), + ] + + args, + stderr=True, + stdout=True, + ) + + if exec_result.exit_code != 0: + raise CodeExecutionError(exec_result.output.decode("utf-8")) + + return exec_result.output.decode("utf-8") + + except DockerException as e: + logger.warning( + "Could not run the script in a container. " + "If you haven't already, please install Docker: " + "https://docs.docker.com/get-docker/" + ) + raise CommandExecutionError(f"Could not run the script in a container: {e}") + + +def validate_command(command_line: str, config: Config) -> tuple[bool, bool]: + """Check whether a command is allowed and whether it may be executed in a shell. + + If shell command control is enabled, we disallow executing in a shell, because + otherwise the model could easily circumvent the command filter using shell features. + + Args: + command_line (str): The command line to validate + config (Config): The application config including shell command control settings + + Returns: + bool: True if the command is allowed, False otherwise + bool: True if the command may be executed in a shell, False otherwise + """ + if not command_line: + return False, False + + command_name = shlex.split(command_line)[0] + + if config.shell_command_control == ALLOWLIST_CONTROL: + return command_name in config.shell_allowlist, False + elif config.shell_command_control == DENYLIST_CONTROL: + return command_name not in config.shell_denylist, False + else: + return True, True + + +@command( + "execute_shell", + "Execute a Shell Command, non-interactive commands only", + { + "command_line": JSONSchema( + type=JSONSchema.Type.STRING, + description="The command line to execute", + required=True, + ) + }, + enabled=lambda config: config.execute_local_commands, + disabled_reason="You are not allowed to run local shell commands. To execute" + " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' " + "in your config file: .env - do not attempt to bypass the restriction.", +) +def execute_shell(command_line: str, agent: Agent) -> str: + """Execute a shell command and return the output + + Args: + command_line (str): The command line to execute + + Returns: + str: The output of the command + """ + allow_execute, allow_shell = validate_command(command_line, agent.legacy_config) + if not allow_execute: + logger.info(f"Command '{command_line}' not allowed") + raise OperationNotAllowedError("This shell command is not allowed.") + + current_dir = Path.cwd() + # Change dir into workspace if necessary + if not current_dir.is_relative_to(agent.workspace.root): + os.chdir(agent.workspace.root) + + logger.info( + f"Executing command '{command_line}' in working directory '{os.getcwd()}'" + ) + + result = subprocess.run( + command_line if allow_shell else shlex.split(command_line), + capture_output=True, + shell=allow_shell, + ) + output = f"STDOUT:\n{result.stdout.decode()}\nSTDERR:\n{result.stderr.decode()}" + + # Change back to whatever the prior working dir was + os.chdir(current_dir) + + return output + + +@command( + "execute_shell_popen", + "Execute a Shell Command, non-interactive commands only", + { + "command_line": JSONSchema( + type=JSONSchema.Type.STRING, + description="The command line to execute", + required=True, + ) + }, + lambda config: config.execute_local_commands, + "You are not allowed to run local shell commands. To execute" + " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' " + "in your config. Do not attempt to bypass the restriction.", +) +def execute_shell_popen(command_line: str, agent: Agent) -> str: + """Execute a shell command with Popen and returns an english description + of the event and the process id + + Args: + command_line (str): The command line to execute + + Returns: + str: Description of the fact that the process started and its id + """ + allow_execute, allow_shell = validate_command(command_line, agent.legacy_config) + if not allow_execute: + logger.info(f"Command '{command_line}' not allowed") + raise OperationNotAllowedError("This shell command is not allowed.") + + current_dir = Path.cwd() + # Change dir into workspace if necessary + if not current_dir.is_relative_to(agent.workspace.root): + os.chdir(agent.workspace.root) + + logger.info( + f"Executing command '{command_line}' in working directory '{os.getcwd()}'" + ) + + do_not_show_output = subprocess.DEVNULL + process = subprocess.Popen( + command_line if allow_shell else shlex.split(command_line), + shell=allow_shell, + stdout=do_not_show_output, + stderr=do_not_show_output, + ) + + # Change back to whatever the prior working dir was + os.chdir(current_dir) + + return f"Subprocess started with PID:'{str(process.pid)}'" diff --git a/autogpts/autogpt/autogpt/commands/file_context.py b/autogpts/autogpt/autogpt/commands/file_context.py new file mode 100644 index 000000000000..004ed1039698 --- /dev/null +++ b/autogpts/autogpt/autogpt/commands/file_context.py @@ -0,0 +1,131 @@ +"""Commands to perform operations on files""" + +from __future__ import annotations + +import contextlib +from pathlib import Path +from typing import TYPE_CHECKING + +from autogpt.agents.features.context import ContextMixin, get_agent_context +from autogpt.agents.utils.exceptions import ( + CommandExecutionError, + DuplicateOperationError, +) +from autogpt.command_decorator import command +from autogpt.core.utils.json_schema import JSONSchema +from autogpt.models.context_item import FileContextItem, FolderContextItem + +from .decorators import sanitize_path_arg + +COMMAND_CATEGORY = "file_operations" +COMMAND_CATEGORY_TITLE = "File Operations" + + +if TYPE_CHECKING: + from autogpt.agents import Agent, BaseAgent + + +def agent_implements_context(agent: BaseAgent) -> bool: + return isinstance(agent, ContextMixin) + + +@command( + "open_file", + "Opens a file for editing or continued viewing;" + " creates it if it does not exist yet. " + "Note: If you only need to read or write a file once, use `write_to_file` instead.", + { + "file_path": JSONSchema( + type=JSONSchema.Type.STRING, + description="The path of the file to open", + required=True, + ) + }, + available=agent_implements_context, +) +@sanitize_path_arg("file_path") +def open_file(file_path: Path, agent: Agent) -> tuple[str, FileContextItem]: + """Open a file and return a context item + + Args: + file_path (Path): The path of the file to open + + Returns: + str: A status message indicating what happened + FileContextItem: A ContextItem representing the opened file + """ + # Try to make the file path relative + relative_file_path = None + with contextlib.suppress(ValueError): + relative_file_path = file_path.relative_to(agent.workspace.root) + + assert (agent_context := get_agent_context(agent)) is not None + + created = False + if not file_path.exists(): + file_path.touch() + created = True + elif not file_path.is_file(): + raise CommandExecutionError(f"{file_path} exists but is not a file") + + file_path = relative_file_path or file_path + + file = FileContextItem( + file_path_in_workspace=file_path, + workspace_path=agent.workspace.root, + ) + if file in agent_context: + raise DuplicateOperationError(f"The file {file_path} is already open") + + return ( + f"File {file_path}{' created,' if created else ''} has been opened" + " and added to the context ✅", + file, + ) + + +@command( + "open_folder", + "Open a folder to keep track of its content", + { + "path": JSONSchema( + type=JSONSchema.Type.STRING, + description="The path of the folder to open", + required=True, + ) + }, + available=agent_implements_context, +) +@sanitize_path_arg("path") +def open_folder(path: Path, agent: Agent) -> tuple[str, FolderContextItem]: + """Open a folder and return a context item + + Args: + path (Path): The path of the folder to open + + Returns: + str: A status message indicating what happened + FolderContextItem: A ContextItem representing the opened folder + """ + # Try to make the path relative + relative_path = None + with contextlib.suppress(ValueError): + relative_path = path.relative_to(agent.workspace.root) + + assert (agent_context := get_agent_context(agent)) is not None + + if not path.exists(): + raise FileNotFoundError(f"open_folder {path} failed: no such file or directory") + elif not path.is_dir(): + raise CommandExecutionError(f"{path} exists but is not a folder") + + path = relative_path or path + + folder = FolderContextItem( + path_in_workspace=path, + workspace_path=agent.workspace.root, + ) + if folder in agent_context: + raise DuplicateOperationError(f"The folder {path} is already open") + + return f"Folder {path} has been opened and added to the context ✅", folder diff --git a/autogpts/autogpt/autogpt/commands/file_operations.py b/autogpts/autogpt/autogpt/commands/file_operations.py new file mode 100644 index 000000000000..55149216fb21 --- /dev/null +++ b/autogpts/autogpt/autogpt/commands/file_operations.py @@ -0,0 +1,241 @@ +"""Commands to perform operations on files""" + +from __future__ import annotations + +import hashlib +import logging +import os +import os.path +from pathlib import Path +from typing import Iterator, Literal + +from autogpt.agents.agent import Agent +from autogpt.agents.utils.exceptions import DuplicateOperationError +from autogpt.command_decorator import command +from autogpt.core.utils.json_schema import JSONSchema +from autogpt.memory.vector import MemoryItemFactory, VectorMemory + +from .decorators import sanitize_path_arg +from .file_operations_utils import decode_textual_file + +COMMAND_CATEGORY = "file_operations" +COMMAND_CATEGORY_TITLE = "File Operations" + + +from .file_context import open_file, open_folder # NOQA + +logger = logging.getLogger(__name__) + +Operation = Literal["write", "append", "delete"] + + +def text_checksum(text: str) -> str: + """Get the hex checksum for the given text.""" + return hashlib.md5(text.encode("utf-8")).hexdigest() + + +def operations_from_log( + logs: list[str], +) -> Iterator[ + tuple[Literal["write", "append"], str, str] | tuple[Literal["delete"], str, None] +]: + """Parse logs and return a tuple containing the log entries""" + for line in logs: + line = line.replace("File Operation Logger", "").strip() + if not line: + continue + operation, tail = line.split(": ", maxsplit=1) + operation = operation.strip() + if operation in ("write", "append"): + path, checksum = (x.strip() for x in tail.rsplit(" #", maxsplit=1)) + yield (operation, path, checksum) + elif operation == "delete": + yield (operation, tail.strip(), None) + + +def file_operations_state(logs: list[str]) -> dict[str, str]: + """Iterates over the operations and returns the expected state. + + Constructs a dictionary that maps each file path written + or appended to its checksum. Deleted files are + removed from the dictionary. + + Returns: + A dictionary mapping file paths to their checksums. + + Raises: + FileNotFoundError: If file_manager.file_ops_log_path is not found. + ValueError: If the log file content is not in the expected format. + """ + state = {} + for operation, path, checksum in operations_from_log(logs): + if operation in ("write", "append"): + state[path] = checksum + elif operation == "delete": + del state[path] + return state + + +@sanitize_path_arg("file_path", make_relative=True) +def is_duplicate_operation( + operation: Operation, file_path: Path, agent: Agent, checksum: str | None = None +) -> bool: + """Check if the operation has already been performed + + Args: + operation: The operation to check for + file_path: The name of the file to check for + agent: The agent + checksum: The checksum of the contents to be written + + Returns: + True if the operation has already been performed on the file + """ + state = file_operations_state(agent.get_file_operation_lines()) + if operation == "delete" and file_path.as_posix() not in state: + return True + if operation == "write" and state.get(file_path.as_posix()) == checksum: + return True + return False + + +@sanitize_path_arg("file_path", make_relative=True) +async def log_operation( + operation: Operation, + file_path: str | Path, + agent: Agent, + checksum: str | None = None, +) -> None: + """Log the file operation to the file_logger.log + + Args: + operation: The operation to log + file_path: The name of the file the operation was performed on + checksum: The checksum of the contents to be written + """ + log_entry = ( + f"{operation}: " + f"{file_path.as_posix() if isinstance(file_path, Path) else file_path}" + ) + if checksum is not None: + log_entry += f" #{checksum}" + logger.debug(f"Logging file operation: {log_entry}") + await agent.log_file_operation(log_entry) + + +@command( + "read_file", + "Read an existing file", + { + "filename": JSONSchema( + type=JSONSchema.Type.STRING, + description="The path of the file to read", + required=True, + ) + }, +) +def read_file(filename: str | Path, agent: Agent) -> str: + """Read a file and return the contents + + Args: + filename (Path): The name of the file to read + + Returns: + str: The contents of the file + """ + file = agent.workspace.open_file(filename, binary=True) + content = decode_textual_file(file, os.path.splitext(filename)[1], logger) + + # # TODO: invalidate/update memory when file is edited + # file_memory = MemoryItem.from_text_file(content, str(filename), agent.config) + # if len(file_memory.chunks) > 1: + # return file_memory.summary + + return content + + +def ingest_file( + filename: str, + memory: VectorMemory, +) -> None: + """ + Ingest a file by reading its content, splitting it into chunks with a specified + maximum length and overlap, and adding the chunks to the memory storage. + + Args: + filename: The name of the file to ingest + memory: An object with an add() method to store the chunks in memory + """ + try: + logger.info(f"Ingesting file {filename}") + content = read_file(filename) + + # TODO: differentiate between different types of files + file_memory = MemoryItemFactory.from_text_file(content, filename) + logger.debug(f"Created memory: {file_memory.dump(True)}") + memory.add(file_memory) + + logger.info(f"Ingested {len(file_memory.e_chunks)} chunks from {filename}") + except Exception as err: + logger.warning(f"Error while ingesting file '{filename}': {err}") + + +@command( + "write_file", + "Write a file, creating it if necessary. If the file exists, it is overwritten.", + { + "filename": JSONSchema( + type=JSONSchema.Type.STRING, + description="The name of the file to write to", + required=True, + ), + "contents": JSONSchema( + type=JSONSchema.Type.STRING, + description="The contents to write to the file", + required=True, + ), + }, + aliases=["create_file"], +) +async def write_to_file(filename: str | Path, contents: str, agent: Agent) -> str: + """Write contents to a file + + Args: + filename (Path): The name of the file to write to + contents (str): The contents to write to the file + + Returns: + str: A message indicating success or failure + """ + checksum = text_checksum(contents) + if is_duplicate_operation("write", Path(filename), agent, checksum): + raise DuplicateOperationError(f"File {filename} has already been updated.") + + if directory := os.path.dirname(filename): + agent.workspace.make_dir(directory) + await agent.workspace.write_file(filename, contents) + await log_operation("write", filename, agent, checksum) + return f"File {filename} has been written successfully." + + +@command( + "list_folder", + "List the items in a folder", + { + "folder": JSONSchema( + type=JSONSchema.Type.STRING, + description="The folder to list files in", + required=True, + ) + }, +) +def list_folder(folder: str | Path, agent: Agent) -> list[str]: + """Lists files in a folder recursively + + Args: + folder (Path): The folder to search in + + Returns: + list[str]: A list of files found in the folder + """ + return [str(p) for p in agent.workspace.list_files(folder)] diff --git a/autogpts/autogpt/autogpt/commands/file_operations_utils.py b/autogpts/autogpt/autogpt/commands/file_operations_utils.py new file mode 100644 index 000000000000..e9dcae41b9f7 --- /dev/null +++ b/autogpts/autogpt/autogpt/commands/file_operations_utils.py @@ -0,0 +1,151 @@ +import json +import logging +from abc import ABC, abstractmethod +from typing import BinaryIO + +import charset_normalizer +import docx +import pypdf +import yaml +from bs4 import BeautifulSoup +from pylatexenc.latex2text import LatexNodes2Text + +logger = logging.getLogger(__name__) + + +class ParserStrategy(ABC): + @abstractmethod + def read(self, file: BinaryIO) -> str: + ... + + +# Basic text file reading +class TXTParser(ParserStrategy): + def read(self, file: BinaryIO) -> str: + charset_match = charset_normalizer.from_bytes(file.read()).best() + logger.debug( + f"Reading {getattr(file, 'name', 'file')} " + f"with encoding '{charset_match.encoding}'" + ) + return str(charset_match) + + +# Reading text from binary file using pdf parser +class PDFParser(ParserStrategy): + def read(self, file: BinaryIO) -> str: + parser = pypdf.PdfReader(file) + text = "" + for page_idx in range(len(parser.pages)): + text += parser.pages[page_idx].extract_text() + return text + + +# Reading text from binary file using docs parser +class DOCXParser(ParserStrategy): + def read(self, file: BinaryIO) -> str: + doc_file = docx.Document(file) + text = "" + for para in doc_file.paragraphs: + text += para.text + return text + + +# Reading as dictionary and returning string format +class JSONParser(ParserStrategy): + def read(self, file: BinaryIO) -> str: + data = json.load(file) + text = str(data) + return text + + +class XMLParser(ParserStrategy): + def read(self, file: BinaryIO) -> str: + soup = BeautifulSoup(file, "xml") + text = soup.get_text() + return text + + +# Reading as dictionary and returning string format +class YAMLParser(ParserStrategy): + def read(self, file: BinaryIO) -> str: + data = yaml.load(file, Loader=yaml.SafeLoader) + text = str(data) + return text + + +class HTMLParser(ParserStrategy): + def read(self, file: BinaryIO) -> str: + soup = BeautifulSoup(file, "html.parser") + text = soup.get_text() + return text + + +class LaTeXParser(ParserStrategy): + def read(self, file: BinaryIO) -> str: + latex = file.read().decode() + text = LatexNodes2Text().latex_to_text(latex) + return text + + +class FileContext: + def __init__(self, parser: ParserStrategy, logger: logging.Logger): + self.parser = parser + self.logger = logger + + def set_parser(self, parser: ParserStrategy) -> None: + self.logger.debug(f"Setting Context Parser to {parser}") + self.parser = parser + + def decode_file(self, file: BinaryIO) -> str: + self.logger.debug( + f"Reading {getattr(file, 'name', 'file')} with parser {self.parser}" + ) + return self.parser.read(file) + + +extension_to_parser = { + ".txt": TXTParser(), + ".md": TXTParser(), + ".markdown": TXTParser(), + ".csv": TXTParser(), + ".pdf": PDFParser(), + ".docx": DOCXParser(), + ".json": JSONParser(), + ".xml": XMLParser(), + ".yaml": YAMLParser(), + ".yml": YAMLParser(), + ".html": HTMLParser(), + ".htm": HTMLParser(), + ".xhtml": HTMLParser(), + ".tex": LaTeXParser(), +} + + +def is_file_binary_fn(file: BinaryIO): + """Given a file path load all its content and checks if the null bytes is present + + Args: + file (_type_): _description_ + + Returns: + bool: is_binary + """ + file_data = file.read() + file.seek(0) + if b"\x00" in file_data: + return True + return False + + +def decode_textual_file(file: BinaryIO, ext: str, logger: logging.Logger) -> str: + if not file.readable(): + raise ValueError(f"{repr(file)} is not readable") + + parser = extension_to_parser.get(ext.lower()) + if not parser: + if is_file_binary_fn(file): + raise ValueError(f"Unsupported binary file format: {ext}") + # fallback to txt file parser (to support script and code files loading) + parser = TXTParser() + file_context = FileContext(parser, logger) + return file_context.decode_file(file) diff --git a/autogpts/autogpt/autogpt/commands/git_operations.py b/autogpts/autogpt/autogpt/commands/git_operations.py new file mode 100644 index 000000000000..01930e42089a --- /dev/null +++ b/autogpts/autogpt/autogpt/commands/git_operations.py @@ -0,0 +1,58 @@ +"""Commands to perform Git operations""" + +from pathlib import Path + +from git.repo import Repo + +from autogpt.agents.agent import Agent +from autogpt.agents.utils.exceptions import CommandExecutionError +from autogpt.command_decorator import command +from autogpt.core.utils.json_schema import JSONSchema +from autogpt.url_utils.validators import validate_url + +from .decorators import sanitize_path_arg + +COMMAND_CATEGORY = "git_operations" +COMMAND_CATEGORY_TITLE = "Git Operations" + + +@command( + "clone_repository", + "Clones a Repository", + { + "url": JSONSchema( + type=JSONSchema.Type.STRING, + description="The URL of the repository to clone", + required=True, + ), + "clone_path": JSONSchema( + type=JSONSchema.Type.STRING, + description="The path to clone the repository to", + required=True, + ), + }, + lambda config: bool(config.github_username and config.github_api_key), + "Configure github_username and github_api_key.", +) +@sanitize_path_arg("clone_path") +@validate_url +def clone_repository(url: str, clone_path: Path, agent: Agent) -> str: + """Clone a GitHub repository locally. + + Args: + url (str): The URL of the repository to clone. + clone_path (Path): The path to clone the repository to. + + Returns: + str: The result of the clone operation. + """ + split_url = url.split("//") + auth_repo_url = f"//{agent.legacy_config.github_username}:{agent.legacy_config.github_api_key}@".join( # noqa: E501 + split_url + ) + try: + Repo.clone_from(url=auth_repo_url, to_path=clone_path) + except Exception as e: + raise CommandExecutionError(f"Could not clone repo: {e}") + + return f"""Cloned {url} to {clone_path}""" diff --git a/autogpts/autogpt/autogpt/commands/image_gen.py b/autogpts/autogpt/autogpt/commands/image_gen.py new file mode 100644 index 000000000000..957c2ac5a0df --- /dev/null +++ b/autogpts/autogpt/autogpt/commands/image_gen.py @@ -0,0 +1,212 @@ +"""Commands to generate images based on text input""" + +import io +import json +import logging +import time +import uuid +from base64 import b64decode +from pathlib import Path + +import requests +from openai import OpenAI +from PIL import Image + +from autogpt.agents.agent import Agent +from autogpt.command_decorator import command +from autogpt.core.utils.json_schema import JSONSchema + +COMMAND_CATEGORY = "text_to_image" +COMMAND_CATEGORY_TITLE = "Text to Image" + + +logger = logging.getLogger(__name__) + + +@command( + "generate_image", + "Generates an Image", + { + "prompt": JSONSchema( + type=JSONSchema.Type.STRING, + description="The prompt used to generate the image", + required=True, + ), + }, + lambda config: bool(config.image_provider), + "Requires a image provider to be set.", +) +def generate_image(prompt: str, agent: Agent, size: int = 256) -> str: + """Generate an image from a prompt. + + Args: + prompt (str): The prompt to use + size (int, optional): The size of the image. Defaults to 256. + Not supported by HuggingFace. + + Returns: + str: The filename of the image + """ + filename = agent.workspace.root / f"{str(uuid.uuid4())}.jpg" + + # DALL-E + if agent.legacy_config.image_provider == "dalle": + return generate_image_with_dalle(prompt, filename, size, agent) + # HuggingFace + elif agent.legacy_config.image_provider == "huggingface": + return generate_image_with_hf(prompt, filename, agent) + # SD WebUI + elif agent.legacy_config.image_provider == "sdwebui": + return generate_image_with_sd_webui(prompt, filename, agent, size) + return "No Image Provider Set" + + +def generate_image_with_hf(prompt: str, output_file: Path, agent: Agent) -> str: + """Generate an image with HuggingFace's API. + + Args: + prompt (str): The prompt to use + filename (Path): The filename to save the image to + + Returns: + str: The filename of the image + """ + API_URL = f"https://api-inference.huggingface.co/models/{agent.legacy_config.huggingface_image_model}" # noqa: E501 + if agent.legacy_config.huggingface_api_token is None: + raise ValueError( + "You need to set your Hugging Face API token in the config file." + ) + headers = { + "Authorization": f"Bearer {agent.legacy_config.huggingface_api_token}", + "X-Use-Cache": "false", + } + + retry_count = 0 + while retry_count < 10: + response = requests.post( + API_URL, + headers=headers, + json={ + "inputs": prompt, + }, + ) + + if response.ok: + try: + image = Image.open(io.BytesIO(response.content)) + logger.info(f"Image Generated for prompt:{prompt}") + image.save(output_file) + return f"Saved to disk: {output_file}" + except Exception as e: + logger.error(e) + break + else: + try: + error = json.loads(response.text) + if "estimated_time" in error: + delay = error["estimated_time"] + logger.debug(response.text) + logger.info("Retrying in", delay) + time.sleep(delay) + else: + break + except Exception as e: + logger.error(e) + break + + retry_count += 1 + + return "Error creating image." + + +def generate_image_with_dalle( + prompt: str, output_file: Path, size: int, agent: Agent +) -> str: + """Generate an image with DALL-E. + + Args: + prompt (str): The prompt to use + filename (Path): The filename to save the image to + size (int): The size of the image + + Returns: + str: The filename of the image + """ + + # Check for supported image sizes + if size not in [256, 512, 1024]: + closest = min([256, 512, 1024], key=lambda x: abs(x - size)) + logger.info( + "DALL-E only supports image sizes of 256x256, 512x512, or 1024x1024. " + f"Setting to {closest}, was {size}." + ) + size = closest + + response = OpenAI( + api_key=agent.legacy_config.openai_credentials.api_key.get_secret_value() + ).images.generate( + prompt=prompt, + n=1, + size=f"{size}x{size}", + response_format="b64_json", + ) + + logger.info(f"Image Generated for prompt:{prompt}") + + image_data = b64decode(response.data[0].b64_json) + + with open(output_file, mode="wb") as png: + png.write(image_data) + + return f"Saved to disk: {output_file}" + + +def generate_image_with_sd_webui( + prompt: str, + output_file: Path, + agent: Agent, + size: int = 512, + negative_prompt: str = "", + extra: dict = {}, +) -> str: + """Generate an image with Stable Diffusion webui. + Args: + prompt (str): The prompt to use + filename (str): The filename to save the image to + size (int, optional): The size of the image. Defaults to 256. + negative_prompt (str, optional): The negative prompt to use. Defaults to "". + extra (dict, optional): Extra parameters to pass to the API. Defaults to {}. + Returns: + str: The filename of the image + """ + # Create a session and set the basic auth if needed + s = requests.Session() + if agent.legacy_config.sd_webui_auth: + username, password = agent.legacy_config.sd_webui_auth.split(":") + s.auth = (username, password or "") + + # Generate the images + response = requests.post( + f"{agent.legacy_config.sd_webui_url}/sdapi/v1/txt2img", + json={ + "prompt": prompt, + "negative_prompt": negative_prompt, + "sampler_index": "DDIM", + "steps": 20, + "config_scale": 7.0, + "width": size, + "height": size, + "n_iter": 1, + **extra, + }, + ) + + logger.info(f"Image Generated for prompt: '{prompt}'") + + # Save the image to disk + response = response.json() + b64 = b64decode(response["images"][0].split(",", 1)[0]) + image = Image.open(io.BytesIO(b64)) + image.save(output_file) + + return f"Saved to disk: {output_file}" diff --git a/autogpts/autogpt/autogpt/commands/system.py b/autogpts/autogpt/autogpt/commands/system.py new file mode 100644 index 000000000000..2d547bd08b2b --- /dev/null +++ b/autogpts/autogpt/autogpt/commands/system.py @@ -0,0 +1,69 @@ +"""Commands to control the internal state of the program""" + +from __future__ import annotations + +import logging +from typing import TYPE_CHECKING + +from autogpt.agents.features.context import get_agent_context +from autogpt.agents.utils.exceptions import AgentFinished, InvalidArgumentError +from autogpt.command_decorator import command +from autogpt.core.utils.json_schema import JSONSchema + +COMMAND_CATEGORY = "system" +COMMAND_CATEGORY_TITLE = "System" + + +if TYPE_CHECKING: + from autogpt.agents.agent import Agent + + +logger = logging.getLogger(__name__) + + +@command( + "finish", + "Use this to shut down once you have completed your task," + " or when there are insurmountable problems that make it impossible" + " for you to finish your task.", + { + "reason": JSONSchema( + type=JSONSchema.Type.STRING, + description="A summary to the user of how the goals were accomplished", + required=True, + ) + }, +) +def finish(reason: str, agent: Agent) -> None: + """ + A function that takes in a string and exits the program + + Parameters: + reason (str): A summary to the user of how the goals were accomplished. + Returns: + A result string from create chat completion. A list of suggestions to + improve the code. + """ + raise AgentFinished(reason) + + +@command( + "hide_context_item", + "Hide an open file, folder or other context item, to save memory.", + { + "number": JSONSchema( + type=JSONSchema.Type.INTEGER, + description="The 1-based index of the context item to hide", + required=True, + ) + }, + available=lambda a: bool(get_agent_context(a)), +) +def close_context_item(number: int, agent: Agent) -> str: + assert (context := get_agent_context(agent)) is not None + + if number > len(context.items) or number == 0: + raise InvalidArgumentError(f"Index {number} out of range") + + context.close(number) + return f"Context item {number} hidden ✅" diff --git a/autogpts/autogpt/autogpt/commands/times.py b/autogpts/autogpt/autogpt/commands/times.py new file mode 100644 index 000000000000..3c9b8a4fc67a --- /dev/null +++ b/autogpts/autogpt/autogpt/commands/times.py @@ -0,0 +1,10 @@ +from datetime import datetime + + +def get_datetime() -> str: + """Return the current date and time + + Returns: + str: The current date and time + """ + return "Current date and time: " + datetime.now().strftime("%Y-%m-%d %H:%M:%S") diff --git a/autogpts/autogpt/autogpt/commands/user_interaction.py b/autogpts/autogpt/autogpt/commands/user_interaction.py new file mode 100644 index 000000000000..bd4dd639ceb7 --- /dev/null +++ b/autogpts/autogpt/autogpt/commands/user_interaction.py @@ -0,0 +1,32 @@ +"""Commands to interact with the user""" + +from __future__ import annotations + +from autogpt.agents.agent import Agent +from autogpt.app.utils import clean_input +from autogpt.command_decorator import command +from autogpt.core.utils.json_schema import JSONSchema + +COMMAND_CATEGORY = "user_interaction" +COMMAND_CATEGORY_TITLE = "User Interaction" + + +@command( + "ask_user", + ( + "If you need more details or information regarding the given goals," + " you can ask the user for input" + ), + { + "question": JSONSchema( + type=JSONSchema.Type.STRING, + description="The question or prompt to the user", + required=True, + ) + }, + enabled=lambda config: not config.noninteractive_mode, +) +async def ask_user(question: str, agent: Agent) -> str: + print(f"\nQ: {question}") + resp = clean_input(agent.legacy_config, "A:") + return f"The user's answer: '{resp}'" diff --git a/autogpts/autogpt/autogpt/commands/web_search.py b/autogpts/autogpt/autogpt/commands/web_search.py new file mode 100644 index 000000000000..dfcb38b3f57c --- /dev/null +++ b/autogpts/autogpt/autogpt/commands/web_search.py @@ -0,0 +1,169 @@ +"""Commands to search the web with""" + +from __future__ import annotations + +import json +import time + +from duckduckgo_search import DDGS + +from autogpt.agents.agent import Agent +from autogpt.agents.utils.exceptions import ConfigurationError +from autogpt.command_decorator import command +from autogpt.core.utils.json_schema import JSONSchema + +COMMAND_CATEGORY = "web_search" +COMMAND_CATEGORY_TITLE = "Web Search" + + +DUCKDUCKGO_MAX_ATTEMPTS = 3 + + +@command( + "web_search", + "Searches the web", + { + "query": JSONSchema( + type=JSONSchema.Type.STRING, + description="The search query", + required=True, + ) + }, + aliases=["search"], +) +def web_search(query: str, agent: Agent, num_results: int = 8) -> str: + """Return the results of a Google search + + Args: + query (str): The search query. + num_results (int): The number of results to return. + + Returns: + str: The results of the search. + """ + search_results = [] + attempts = 0 + + while attempts < DUCKDUCKGO_MAX_ATTEMPTS: + if not query: + return json.dumps(search_results) + + search_results = DDGS().text(query, max_results=num_results) + + if search_results: + break + + time.sleep(1) + attempts += 1 + + search_results = [ + { + "title": r["title"], + "url": r["href"], + **({"exerpt": r["body"]} if r.get("body") else {}), + } + for r in search_results + ] + + results = ( + "## Search results\n" + # "Read these results carefully." + # " Extract the information you need for your task from the list of results" + # " if possible. Otherwise, choose a webpage from the list to read entirely." + # "\n\n" + ) + "\n\n".join( + f"### \"{r['title']}\"\n" + f"**URL:** {r['url']} \n" + "**Excerpt:** " + (f'"{exerpt}"' if (exerpt := r.get("exerpt")) else "N/A") + for r in search_results + ) + return safe_google_results(results) + + +@command( + "google", + "Google Search", + { + "query": JSONSchema( + type=JSONSchema.Type.STRING, + description="The search query", + required=True, + ) + }, + lambda config: bool(config.google_api_key) + and bool(config.google_custom_search_engine_id), + "Configure google_api_key and custom_search_engine_id.", + aliases=["search"], +) +def google(query: str, agent: Agent, num_results: int = 8) -> str | list[str]: + """Return the results of a Google search using the official Google API + + Args: + query (str): The search query. + num_results (int): The number of results to return. + + Returns: + str: The results of the search. + """ + + from googleapiclient.discovery import build + from googleapiclient.errors import HttpError + + try: + # Get the Google API key and Custom Search Engine ID from the config file + api_key = agent.legacy_config.google_api_key + custom_search_engine_id = agent.legacy_config.google_custom_search_engine_id + + # Initialize the Custom Search API service + service = build("customsearch", "v1", developerKey=api_key) + + # Send the search query and retrieve the results + result = ( + service.cse() + .list(q=query, cx=custom_search_engine_id, num=num_results) + .execute() + ) + + # Extract the search result items from the response + search_results = result.get("items", []) + + # Create a list of only the URLs from the search results + search_results_links = [item["link"] for item in search_results] + + except HttpError as e: + # Handle errors in the API call + error_details = json.loads(e.content.decode()) + + # Check if the error is related to an invalid or missing API key + if error_details.get("error", {}).get( + "code" + ) == 403 and "invalid API key" in error_details.get("error", {}).get( + "message", "" + ): + raise ConfigurationError( + "The provided Google API key is invalid or missing." + ) + raise + # google_result can be a list or a string depending on the search results + + # Return the list of search result URLs + return safe_google_results(search_results_links) + + +def safe_google_results(results: str | list) -> str: + """ + Return the results of a Google search in a safe format. + + Args: + results (str | list): The search results. + + Returns: + str: The results of the search. + """ + if isinstance(results, list): + safe_message = json.dumps( + [result.encode("utf-8", "ignore").decode("utf-8") for result in results] + ) + else: + safe_message = results.encode("utf-8", "ignore").decode("utf-8") + return safe_message diff --git a/autogpts/autogpt/autogpt/commands/web_selenium.py b/autogpts/autogpt/autogpt/commands/web_selenium.py new file mode 100644 index 000000000000..59adb61b4ab6 --- /dev/null +++ b/autogpts/autogpt/autogpt/commands/web_selenium.py @@ -0,0 +1,379 @@ +"""Commands for browsing a website""" + +from __future__ import annotations + +import asyncio +import logging +import re +from pathlib import Path +from sys import platform +from typing import TYPE_CHECKING, Optional, Type +from urllib.request import urlretrieve + +from bs4 import BeautifulSoup +from selenium.common.exceptions import WebDriverException +from selenium.webdriver.chrome.options import Options as ChromeOptions +from selenium.webdriver.chrome.service import Service as ChromeDriverService +from selenium.webdriver.chrome.webdriver import WebDriver as ChromeDriver +from selenium.webdriver.common.by import By +from selenium.webdriver.common.options import ArgOptions as BrowserOptions +from selenium.webdriver.edge.options import Options as EdgeOptions +from selenium.webdriver.edge.service import Service as EdgeDriverService +from selenium.webdriver.edge.webdriver import WebDriver as EdgeDriver +from selenium.webdriver.firefox.options import Options as FirefoxOptions +from selenium.webdriver.firefox.service import Service as GeckoDriverService +from selenium.webdriver.firefox.webdriver import WebDriver as FirefoxDriver +from selenium.webdriver.remote.webdriver import WebDriver +from selenium.webdriver.safari.options import Options as SafariOptions +from selenium.webdriver.safari.webdriver import WebDriver as SafariDriver +from selenium.webdriver.support import expected_conditions as EC +from selenium.webdriver.support.wait import WebDriverWait +from webdriver_manager.chrome import ChromeDriverManager +from webdriver_manager.firefox import GeckoDriverManager +from webdriver_manager.microsoft import EdgeChromiumDriverManager as EdgeDriverManager + +from autogpt.agents.utils.exceptions import CommandExecutionError, TooMuchOutputError +from autogpt.command_decorator import command +from autogpt.core.utils.json_schema import JSONSchema +from autogpt.processing.html import extract_hyperlinks, format_hyperlinks +from autogpt.processing.text import extract_information, summarize_text +from autogpt.url_utils.validators import validate_url + +COMMAND_CATEGORY = "web_browse" +COMMAND_CATEGORY_TITLE = "Web Browsing" + + +if TYPE_CHECKING: + from autogpt.agents.agent import Agent + from autogpt.config import Config + + +logger = logging.getLogger(__name__) + +FILE_DIR = Path(__file__).parent.parent +MAX_RAW_CONTENT_LENGTH = 500 +LINKS_TO_RETURN = 20 + + +class BrowsingError(CommandExecutionError): + """An error occurred while trying to browse the page""" + + +@command( + "read_webpage", + ( + "Read a webpage, and extract specific information from it." + " You must specify either topics_of_interest, a question, or get_raw_content." + ), + { + "url": JSONSchema( + type=JSONSchema.Type.STRING, + description="The URL to visit", + required=True, + ), + "topics_of_interest": JSONSchema( + type=JSONSchema.Type.ARRAY, + items=JSONSchema(type=JSONSchema.Type.STRING), + description=( + "A list of topics about which you want to extract information " + "from the page." + ), + required=False, + ), + "question": JSONSchema( + type=JSONSchema.Type.STRING, + description=( + "A question that you want to answer using the content of the webpage." + ), + required=False, + ), + "get_raw_content": JSONSchema( + type=JSONSchema.Type.BOOLEAN, + description=( + "If true, the unprocessed content of the webpage will be returned. " + "This consumes a lot of tokens, so use it with caution." + ), + required=False, + ), + }, +) +@validate_url +async def read_webpage( + url: str, + agent: Agent, + *, + topics_of_interest: list[str] = [], + get_raw_content: bool = False, + question: str = "", +) -> str: + """Browse a website and return the answer and links to the user + + Args: + url (str): The url of the website to browse + question (str): The question to answer using the content of the webpage + + Returns: + str: The answer and links to the user and the webdriver + """ + driver = None + try: + driver = await open_page_in_browser(url, agent.legacy_config) + + text = scrape_text_with_selenium(driver) + links = scrape_links_with_selenium(driver, url) + + return_literal_content = True + summarized = False + if not text: + return f"Website did not contain any text.\n\nLinks: {links}" + elif get_raw_content: + if ( + output_tokens := agent.llm_provider.count_tokens(text, agent.llm.name) + ) > MAX_RAW_CONTENT_LENGTH: + oversize_factor = round(output_tokens / MAX_RAW_CONTENT_LENGTH, 1) + raise TooMuchOutputError( + f"Page content is {oversize_factor}x the allowed length " + "for `get_raw_content=true`" + ) + return text + (f"\n\nLinks: {links}" if links else "") + else: + text = await summarize_memorize_webpage( + url, text, question or None, topics_of_interest, agent, driver + ) + return_literal_content = bool(question) + summarized = True + + # Limit links to LINKS_TO_RETURN + if len(links) > LINKS_TO_RETURN: + links = links[:LINKS_TO_RETURN] + + text_fmt = f"'''{text}'''" if "\n" in text else f"'{text}'" + links_fmt = "\n".join(f"- {link}" for link in links) + return ( + f"Page content{' (summary)' if summarized else ''}:" + if return_literal_content + else "Answer gathered from webpage:" + ) + f" {text_fmt}\n\nLinks:\n{links_fmt}" + + except WebDriverException as e: + # These errors are often quite long and include lots of context. + # Just grab the first line. + msg = e.msg.split("\n")[0] + if "net::" in msg: + raise BrowsingError( + "A networking error occurred while trying to load the page: %s" + % re.sub(r"^unknown error: ", "", msg) + ) + raise CommandExecutionError(msg) + finally: + if driver: + close_browser(driver) + + +def scrape_text_with_selenium(driver: WebDriver) -> str: + """Scrape text from a browser window using selenium + + Args: + driver (WebDriver): A driver object representing the browser window to scrape + + Returns: + str: the text scraped from the website + """ + + # Get the HTML content directly from the browser's DOM + page_source = driver.execute_script("return document.body.outerHTML;") + soup = BeautifulSoup(page_source, "html.parser") + + for script in soup(["script", "style"]): + script.extract() + + text = soup.get_text() + lines = (line.strip() for line in text.splitlines()) + chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) + text = "\n".join(chunk for chunk in chunks if chunk) + return text + + +def scrape_links_with_selenium(driver: WebDriver, base_url: str) -> list[str]: + """Scrape links from a website using selenium + + Args: + driver (WebDriver): A driver object representing the browser window to scrape + base_url (str): The base URL to use for resolving relative links + + Returns: + List[str]: The links scraped from the website + """ + page_source = driver.page_source + soup = BeautifulSoup(page_source, "html.parser") + + for script in soup(["script", "style"]): + script.extract() + + hyperlinks = extract_hyperlinks(soup, base_url) + + return format_hyperlinks(hyperlinks) + + +async def open_page_in_browser(url: str, config: Config) -> WebDriver: + """Open a browser window and load a web page using Selenium + + Params: + url (str): The URL of the page to load + config (Config): The applicable application configuration + + Returns: + driver (WebDriver): A driver object representing the browser window to scrape + """ + logging.getLogger("selenium").setLevel(logging.CRITICAL) + + options_available: dict[str, Type[BrowserOptions]] = { + "chrome": ChromeOptions, + "edge": EdgeOptions, + "firefox": FirefoxOptions, + "safari": SafariOptions, + } + + options: BrowserOptions = options_available[config.selenium_web_browser]() + options.add_argument(f"user-agent={config.user_agent}") + + if isinstance(options, FirefoxOptions): + if config.selenium_headless: + options.headless = True + options.add_argument("--disable-gpu") + driver = FirefoxDriver( + service=GeckoDriverService(GeckoDriverManager().install()), options=options + ) + elif isinstance(options, EdgeOptions): + driver = EdgeDriver( + service=EdgeDriverService(EdgeDriverManager().install()), options=options + ) + elif isinstance(options, SafariOptions): + # Requires a bit more setup on the users end. + # See https://developer.apple.com/documentation/webkit/testing_with_webdriver_in_safari # noqa: E501 + driver = SafariDriver(options=options) + elif isinstance(options, ChromeOptions): + if platform == "linux" or platform == "linux2": + options.add_argument("--disable-dev-shm-usage") + options.add_argument("--remote-debugging-port=9222") + + options.add_argument("--no-sandbox") + if config.selenium_headless: + options.add_argument("--headless=new") + options.add_argument("--disable-gpu") + + _sideload_chrome_extensions(options, config.app_data_dir / "assets" / "crx") + + if (chromium_driver_path := Path("/usr/bin/chromedriver")).exists(): + chrome_service = ChromeDriverService(str(chromium_driver_path)) + else: + try: + chrome_driver = ChromeDriverManager().install() + except AttributeError as e: + if "'NoneType' object has no attribute 'split'" in str(e): + # https://github.com/SergeyPirogov/webdriver_manager/issues/649 + logger.critical( + "Connecting to browser failed: is Chrome or Chromium installed?" + ) + raise + chrome_service = ChromeDriverService(chrome_driver) + driver = ChromeDriver(service=chrome_service, options=options) + + driver.get(url) + + # Wait for page to be ready, sleep 2 seconds, wait again until page ready. + # This allows the cookiewall squasher time to get rid of cookie walls. + WebDriverWait(driver, 10).until( + EC.presence_of_element_located((By.TAG_NAME, "body")) + ) + await asyncio.sleep(2) + WebDriverWait(driver, 10).until( + EC.presence_of_element_located((By.TAG_NAME, "body")) + ) + + return driver + + +def _sideload_chrome_extensions(options: ChromeOptions, dl_folder: Path) -> None: + crx_download_url_template = "https://clients2.google.com/service/update2/crx?response=redirect&prodversion=49.0&acceptformat=crx3&x=id%3D{crx_id}%26installsource%3Dondemand%26uc" # noqa + cookiewall_squasher_crx_id = "edibdbjcniadpccecjdfdjjppcpchdlm" + adblocker_crx_id = "cjpalhdlnbpafiamejdnhcphjbkeiagm" + + # Make sure the target folder exists + dl_folder.mkdir(parents=True, exist_ok=True) + + for crx_id in (cookiewall_squasher_crx_id, adblocker_crx_id): + crx_path = dl_folder / f"{crx_id}.crx" + if not crx_path.exists(): + logger.debug(f"Downloading CRX {crx_id}...") + crx_download_url = crx_download_url_template.format(crx_id=crx_id) + urlretrieve(crx_download_url, crx_path) + logger.debug(f"Downloaded {crx_path.name}") + options.add_extension(str(crx_path)) + + +def close_browser(driver: WebDriver) -> None: + """Close the browser + + Args: + driver (WebDriver): The webdriver to close + + Returns: + None + """ + driver.quit() + + +async def summarize_memorize_webpage( + url: str, + text: str, + question: str | None, + topics_of_interest: list[str], + agent: Agent, + driver: Optional[WebDriver] = None, +) -> str: + """Summarize text using the OpenAI API + + Args: + url (str): The url of the text + text (str): The text to summarize + question (str): The question to ask the model + driver (WebDriver): The webdriver to use to scroll the page + + Returns: + str: The summary of the text + """ + if not text: + raise ValueError("No text to summarize") + + text_length = len(text) + logger.debug(f"Web page content length: {text_length} characters") + + # memory = get_memory(agent.legacy_config) + + # new_memory = MemoryItem.from_webpage( + # content=text, + # url=url, + # config=agent.legacy_config, + # question=question, + # ) + # memory.add(new_memory) + + result = None + information = None + if topics_of_interest: + information = await extract_information( + text, + topics_of_interest=topics_of_interest, + llm_provider=agent.llm_provider, + config=agent.legacy_config, + ) + return "\n".join(f"* {i}" for i in information) + else: + result, _ = await summarize_text( + text, + question=question, + llm_provider=agent.llm_provider, + config=agent.legacy_config, + ) + return result diff --git a/autogpts/autogpt/autogpt/config/__init__.py b/autogpts/autogpt/autogpt/config/__init__.py new file mode 100644 index 000000000000..e0c113391088 --- /dev/null +++ b/autogpts/autogpt/autogpt/config/__init__.py @@ -0,0 +1,14 @@ +""" +This module contains the configuration classes for AutoGPT. +""" +from .ai_directives import AIDirectives +from .ai_profile import AIProfile +from .config import Config, ConfigBuilder, assert_config_has_openai_api_key + +__all__ = [ + "assert_config_has_openai_api_key", + "AIProfile", + "AIDirectives", + "Config", + "ConfigBuilder", +] diff --git a/autogpts/autogpt/autogpt/config/ai_directives.py b/autogpts/autogpt/autogpt/config/ai_directives.py new file mode 100644 index 000000000000..5e2957ef36a7 --- /dev/null +++ b/autogpts/autogpt/autogpt/config/ai_directives.py @@ -0,0 +1,48 @@ +import logging +from pathlib import Path + +import yaml +from pydantic import BaseModel, Field + +from autogpt.logs.helpers import request_user_double_check +from autogpt.utils import validate_yaml_file + +logger = logging.getLogger(__name__) + + +class AIDirectives(BaseModel): + """An object that contains the basic directives for the AI prompt. + + Attributes: + constraints (list): A list of constraints that the AI should adhere to. + resources (list): A list of resources that the AI can utilize. + best_practices (list): A list of best practices that the AI should follow. + """ + + resources: list[str] = Field(default_factory=list) + constraints: list[str] = Field(default_factory=list) + best_practices: list[str] = Field(default_factory=list) + + @staticmethod + def from_file(prompt_settings_file: Path) -> "AIDirectives": + (validated, message) = validate_yaml_file(prompt_settings_file) + if not validated: + logger.error(message, extra={"title": "FAILED FILE VALIDATION"}) + request_user_double_check() + raise RuntimeError(f"File validation failed: {message}") + + with open(prompt_settings_file, encoding="utf-8") as file: + config_params = yaml.load(file, Loader=yaml.SafeLoader) + + return AIDirectives( + constraints=config_params.get("constraints", []), + resources=config_params.get("resources", []), + best_practices=config_params.get("best_practices", []), + ) + + def __add__(self, other: "AIDirectives") -> "AIDirectives": + return AIDirectives( + resources=self.resources + other.resources, + constraints=self.constraints + other.constraints, + best_practices=self.best_practices + other.best_practices, + ).copy(deep=True) diff --git a/autogpts/autogpt/autogpt/config/ai_profile.py b/autogpts/autogpt/autogpt/config/ai_profile.py new file mode 100644 index 000000000000..3f0043c79861 --- /dev/null +++ b/autogpts/autogpt/autogpt/config/ai_profile.py @@ -0,0 +1,68 @@ +from pathlib import Path + +import yaml +from pydantic import BaseModel, Field + + +class AIProfile(BaseModel): + """ + Object to hold the AI's personality. + + Attributes: + ai_name (str): The name of the AI. + ai_role (str): The description of the AI's role. + ai_goals (list): The list of objectives the AI is supposed to complete. + api_budget (float): The maximum dollar value for API calls (0.0 means infinite) + """ + + ai_name: str = "" + ai_role: str = "" + ai_goals: list[str] = Field(default_factory=list[str]) + api_budget: float = 0.0 + + @staticmethod + def load(ai_settings_file: str | Path) -> "AIProfile": + """ + Returns class object with parameters (ai_name, ai_role, ai_goals, api_budget) + loaded from yaml file if it exists, else returns class with no parameters. + + Parameters: + ai_settings_file (Path): The path to the config yaml file. + + Returns: + cls (object): An instance of given cls object + """ + + try: + with open(ai_settings_file, encoding="utf-8") as file: + config_params = yaml.load(file, Loader=yaml.SafeLoader) or {} + except FileNotFoundError: + config_params = {} + + ai_name = config_params.get("ai_name", "") + ai_role = config_params.get("ai_role", "") + ai_goals = [ + str(goal).strip("{}").replace("'", "").replace('"', "") + if isinstance(goal, dict) + else str(goal) + for goal in config_params.get("ai_goals", []) + ] + api_budget = config_params.get("api_budget", 0.0) + + return AIProfile( + ai_name=ai_name, ai_role=ai_role, ai_goals=ai_goals, api_budget=api_budget + ) + + def save(self, ai_settings_file: str | Path) -> None: + """ + Saves the class parameters to the specified file yaml file path as a yaml file. + + Parameters: + ai_settings_file (Path): The path to the config yaml file. + + Returns: + None + """ + + with open(ai_settings_file, "w", encoding="utf-8") as file: + yaml.dump(self.dict(), file, allow_unicode=True) diff --git a/autogpts/autogpt/autogpt/config/config.py b/autogpts/autogpt/autogpt/config/config.py new file mode 100644 index 000000000000..6d932abf01f8 --- /dev/null +++ b/autogpts/autogpt/autogpt/config/config.py @@ -0,0 +1,343 @@ +"""Configuration class to store the state of bools for different scripts access.""" +from __future__ import annotations + +import logging +import os +import re +from pathlib import Path +from typing import Any, Optional, Union + +from auto_gpt_plugin_template import AutoGPTPluginTemplate +from colorama import Fore +from pydantic import Field, SecretStr, validator + +import autogpt +from autogpt.app.utils import clean_input +from autogpt.core.configuration.schema import ( + Configurable, + SystemSettings, + UserConfigurable, +) +from autogpt.core.resource.model_providers.openai import ( + OPEN_AI_CHAT_MODELS, + OpenAICredentials, + OpenAIModelName, +) +from autogpt.file_storage import FileStorageBackendName +from autogpt.logs.config import LoggingConfig +from autogpt.plugins.plugins_config import PluginsConfig +from autogpt.speech import TTSConfig + +logger = logging.getLogger(__name__) + +PROJECT_ROOT = Path(autogpt.__file__).parent.parent +AI_SETTINGS_FILE = Path("ai_settings.yaml") +AZURE_CONFIG_FILE = Path("azure.yaml") +PLUGINS_CONFIG_FILE = Path("plugins_config.yaml") +PROMPT_SETTINGS_FILE = Path("prompt_settings.yaml") + +GPT_4_MODEL = OpenAIModelName.GPT4 +GPT_3_MODEL = OpenAIModelName.GPT3 + + +class Config(SystemSettings, arbitrary_types_allowed=True): + name: str = "Auto-GPT configuration" + description: str = "Default configuration for the Auto-GPT application." + + ######################## + # Application Settings # + ######################## + project_root: Path = PROJECT_ROOT + app_data_dir: Path = project_root / "data" + skip_news: bool = False + skip_reprompt: bool = False + authorise_key: str = UserConfigurable(default="y", from_env="AUTHORISE_COMMAND_KEY") + exit_key: str = UserConfigurable(default="n", from_env="EXIT_KEY") + noninteractive_mode: bool = False + chat_messages_enabled: bool = UserConfigurable( + default=True, from_env=lambda: os.getenv("CHAT_MESSAGES_ENABLED") == "True" + ) + + # TTS configuration + logging: LoggingConfig = LoggingConfig() + tts_config: TTSConfig = TTSConfig() + + # File storage + file_storage_backend: FileStorageBackendName = UserConfigurable( + default=FileStorageBackendName.LOCAL, from_env="FILE_STORAGE_BACKEND" + ) + + ########################## + # Agent Control Settings # + ########################## + # Paths + ai_settings_file: Path = UserConfigurable( + default=AI_SETTINGS_FILE, from_env="AI_SETTINGS_FILE" + ) + prompt_settings_file: Path = UserConfigurable( + default=PROMPT_SETTINGS_FILE, + from_env="PROMPT_SETTINGS_FILE", + ) + + # Model configuration + fast_llm: OpenAIModelName = UserConfigurable( + default=OpenAIModelName.GPT3, + from_env="FAST_LLM", + ) + smart_llm: OpenAIModelName = UserConfigurable( + default=OpenAIModelName.GPT4_TURBO, + from_env="SMART_LLM", + ) + temperature: float = UserConfigurable(default=0, from_env="TEMPERATURE") + openai_functions: bool = UserConfigurable( + default=False, from_env=lambda: os.getenv("OPENAI_FUNCTIONS", "False") == "True" + ) + embedding_model: str = UserConfigurable( + default="text-embedding-3-small", from_env="EMBEDDING_MODEL" + ) + browse_spacy_language_model: str = UserConfigurable( + default="en_core_web_sm", from_env="BROWSE_SPACY_LANGUAGE_MODEL" + ) + + # Run loop configuration + continuous_mode: bool = False + continuous_limit: int = 0 + + ########## + # Memory # + ########## + memory_backend: str = UserConfigurable("json_file", from_env="MEMORY_BACKEND") + memory_index: str = UserConfigurable("auto-gpt-memory", from_env="MEMORY_INDEX") + redis_host: str = UserConfigurable("localhost", from_env="REDIS_HOST") + redis_port: int = UserConfigurable(default=6379, from_env="REDIS_PORT") + redis_password: str = UserConfigurable("", from_env="REDIS_PASSWORD") + wipe_redis_on_start: bool = UserConfigurable( + default=True, + from_env=lambda: os.getenv("WIPE_REDIS_ON_START", "True") == "True", + ) + + ############ + # Commands # + ############ + # General + disabled_command_categories: list[str] = UserConfigurable( + default_factory=list, + from_env=lambda: _safe_split(os.getenv("DISABLED_COMMAND_CATEGORIES")), + ) + + # File ops + restrict_to_workspace: bool = UserConfigurable( + default=True, + from_env=lambda: os.getenv("RESTRICT_TO_WORKSPACE", "True") == "True", + ) + allow_downloads: bool = False + + # Shell commands + shell_command_control: str = UserConfigurable( + default="denylist", from_env="SHELL_COMMAND_CONTROL" + ) + execute_local_commands: bool = UserConfigurable( + default=False, + from_env=lambda: os.getenv("EXECUTE_LOCAL_COMMANDS", "False") == "True", + ) + shell_denylist: list[str] = UserConfigurable( + default_factory=lambda: ["sudo", "su"], + from_env=lambda: _safe_split( + os.getenv("SHELL_DENYLIST", os.getenv("DENY_COMMANDS")) + ), + ) + shell_allowlist: list[str] = UserConfigurable( + default_factory=list, + from_env=lambda: _safe_split( + os.getenv("SHELL_ALLOWLIST", os.getenv("ALLOW_COMMANDS")) + ), + ) + + # Text to image + image_provider: Optional[str] = UserConfigurable(from_env="IMAGE_PROVIDER") + huggingface_image_model: str = UserConfigurable( + default="CompVis/stable-diffusion-v1-4", from_env="HUGGINGFACE_IMAGE_MODEL" + ) + sd_webui_url: Optional[str] = UserConfigurable( + default="http://localhost:7860", from_env="SD_WEBUI_URL" + ) + image_size: int = UserConfigurable(default=256, from_env="IMAGE_SIZE") + + # Audio to text + audio_to_text_provider: str = UserConfigurable( + default="huggingface", from_env="AUDIO_TO_TEXT_PROVIDER" + ) + huggingface_audio_to_text_model: Optional[str] = UserConfigurable( + from_env="HUGGINGFACE_AUDIO_TO_TEXT_MODEL" + ) + + # Web browsing + selenium_web_browser: str = UserConfigurable("chrome", from_env="USE_WEB_BROWSER") + selenium_headless: bool = UserConfigurable( + default=True, from_env=lambda: os.getenv("HEADLESS_BROWSER", "True") == "True" + ) + user_agent: str = UserConfigurable( + default="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36", # noqa: E501 + from_env="USER_AGENT", + ) + + ################### + # Plugin Settings # + ################### + plugins_dir: str = UserConfigurable("plugins", from_env="PLUGINS_DIR") + plugins_config_file: Path = UserConfigurable( + default=PLUGINS_CONFIG_FILE, from_env="PLUGINS_CONFIG_FILE" + ) + plugins_config: PluginsConfig = Field( + default_factory=lambda: PluginsConfig(plugins={}) + ) + plugins: list[AutoGPTPluginTemplate] = Field(default_factory=list, exclude=True) + plugins_allowlist: list[str] = UserConfigurable( + default_factory=list, + from_env=lambda: _safe_split(os.getenv("ALLOWLISTED_PLUGINS")), + ) + plugins_denylist: list[str] = UserConfigurable( + default_factory=list, + from_env=lambda: _safe_split(os.getenv("DENYLISTED_PLUGINS")), + ) + plugins_openai: list[str] = UserConfigurable( + default_factory=list, from_env=lambda: _safe_split(os.getenv("OPENAI_PLUGINS")) + ) + + ############### + # Credentials # + ############### + # OpenAI + openai_credentials: Optional[OpenAICredentials] = None + azure_config_file: Optional[Path] = UserConfigurable( + default=AZURE_CONFIG_FILE, from_env="AZURE_CONFIG_FILE" + ) + + # Github + github_api_key: Optional[str] = UserConfigurable(from_env="GITHUB_API_KEY") + github_username: Optional[str] = UserConfigurable(from_env="GITHUB_USERNAME") + + # Google + google_api_key: Optional[str] = UserConfigurable(from_env="GOOGLE_API_KEY") + google_custom_search_engine_id: Optional[str] = UserConfigurable( + from_env="GOOGLE_CUSTOM_SEARCH_ENGINE_ID", + ) + + # Huggingface + huggingface_api_token: Optional[str] = UserConfigurable( + from_env="HUGGINGFACE_API_TOKEN" + ) + + # Stable Diffusion + sd_webui_auth: Optional[str] = UserConfigurable(from_env="SD_WEBUI_AUTH") + + @validator("plugins", each_item=True) + def validate_plugins(cls, p: AutoGPTPluginTemplate | Any): + assert issubclass( + p.__class__, AutoGPTPluginTemplate + ), f"{p} does not subclass AutoGPTPluginTemplate" + assert ( + p.__class__.__name__ != "AutoGPTPluginTemplate" + ), f"Plugins must subclass AutoGPTPluginTemplate; {p} is a template instance" + return p + + @validator("openai_functions") + def validate_openai_functions(cls, v: bool, values: dict[str, Any]): + if v: + smart_llm = values["smart_llm"] + assert OPEN_AI_CHAT_MODELS[smart_llm].has_function_call_api, ( + f"Model {smart_llm} does not support OpenAI Functions. " + "Please disable OPENAI_FUNCTIONS or choose a suitable model." + ) + return v + + +class ConfigBuilder(Configurable[Config]): + default_settings = Config() + + @classmethod + def build_config_from_env(cls, project_root: Path = PROJECT_ROOT) -> Config: + """Initialize the Config class""" + + config = cls.build_agent_configuration() + config.project_root = project_root + + # Make relative paths absolute + for k in { + "ai_settings_file", # TODO: deprecate or repurpose + "prompt_settings_file", # TODO: deprecate or repurpose + "plugins_config_file", # TODO: move from project root + "azure_config_file", # TODO: move from project root + }: + setattr(config, k, project_root / getattr(config, k)) + + if ( + config.openai_credentials + and config.openai_credentials.api_type == "azure" + and (config_file := config.azure_config_file) + ): + config.openai_credentials.load_azure_config(config_file) + + config.plugins_config = PluginsConfig.load_config( + config.plugins_config_file, + config.plugins_denylist, + config.plugins_allowlist, + ) + + return config + + +def assert_config_has_openai_api_key(config: Config) -> None: + """Check if the OpenAI API key is set in config.py or as an environment variable.""" + key_pattern = r"^sk-(proj-)?\w{48}" + openai_api_key = ( + config.openai_credentials.api_key.get_secret_value() + if config.openai_credentials + else "" + ) + + # If there's no credentials or empty API key, prompt the user to set it + if not openai_api_key: + logger.error( + "Please set your OpenAI API key in .env or as an environment variable." + ) + logger.info( + "You can get your key from https://platform.openai.com/account/api-keys" + ) + openai_api_key = clean_input( + config, "Please enter your OpenAI API key if you have it:" + ) + openai_api_key = openai_api_key.strip() + if re.search(key_pattern, openai_api_key): + os.environ["OPENAI_API_KEY"] = openai_api_key + if config.openai_credentials: + config.openai_credentials.api_key = SecretStr(openai_api_key) + else: + config.openai_credentials = OpenAICredentials( + api_key=SecretStr(openai_api_key) + ) + print("OpenAI API key successfully set!") + print( + f"{Fore.YELLOW}NOTE: The API key you've set is only temporary. " + f"For longer sessions, please set it in the .env file{Fore.RESET}" + ) + else: + print(f"{Fore.RED}Invalid OpenAI API key{Fore.RESET}") + exit(1) + # If key is set, but it looks invalid + elif not re.search(key_pattern, openai_api_key): + logger.error( + "Invalid OpenAI API key! " + "Please set your OpenAI API key in .env or as an environment variable." + ) + logger.info( + "You can get your key from https://platform.openai.com/account/api-keys" + ) + exit(1) + + +def _safe_split(s: Union[str, None], sep: str = ",") -> list[str]: + """Split a string by a separator. Return an empty list if the string is None.""" + if s is None: + return [] + return s.split(sep) diff --git a/autogpts/autogpt/autogpt/core/ARCHITECTURE_NOTES.md b/autogpts/autogpt/autogpt/core/ARCHITECTURE_NOTES.md new file mode 100644 index 000000000000..af6aac7b78d7 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/ARCHITECTURE_NOTES.md @@ -0,0 +1,271 @@ +# Re-architecture Notes + +## Key Documents + +- [Planned Agent Workflow](https://whimsical.com/agent-workflow-v2-NmnTQ8R7sVo7M3S43XgXmZ) +- [Original Architecture Diagram](https://www.figma.com/file/fwdj44tPR7ArYtnGGUKknw/Modular-Architecture?type=whiteboard&node-id=0-1) - This is sadly well out of date at this point. +- [Kanban](https://github.com/orgs/Significant-Gravitas/projects/1/views/1?filterQuery=label%3Are-arch) + +## The Motivation + +The `master` branch of AutoGPT is an organically grown amalgamation of many thoughts +and ideas about agent-driven autonomous systems. It lacks clear abstraction boundaries, +has issues of global state and poorly encapsulated state, and is generally just hard to +make effective changes to. Mainly it's just a system that's hard to make changes to. +And research in the field is moving fast, so we want to be able to try new ideas +quickly. + +## Initial Planning + +A large group of maintainers and contributors met do discuss the architectural +challenges associated with the existing codebase. Many much-desired features (building +new user interfaces, enabling project-specific agents, enabling multi-agent systems) +are bottlenecked by the global state in the system. We discussed the tradeoffs between +an incremental system transition and a big breaking version change and decided to go +for the breaking version change. We justified this by saying: + +- We can maintain, in essence, the same user experience as now even with a radical + restructuring of the codebase +- Our developer audience is struggling to use the existing codebase to build + applications and libraries of their own, so this breaking change will largely be + welcome. + +## Primary Goals + +- Separate the AutoGPT application code from the library code. +- Remove global state from the system +- Allow for multiple agents per user (with facilities for running simultaneously) +- Create a serializable representation of an Agent +- Encapsulate the core systems in abstractions with clear boundaries. + +## Secondary goals + +- Use existing tools to ditch any unnecessary cruft in the codebase (document loading, + json parsing, anything easier to replace than to port). +- Bring in the [core agent loop updates](https://whimsical.com/agent-workflow-v2-NmnTQ8R7sVo7M3S43XgXmZ) + being developed simultaneously by @Pwuts + +# The Agent Subsystems + +## Configuration + +We want a lot of things from a configuration system. We lean heavily on it in the +`master` branch to allow several parts of the system to communicate with each other. +[Recent work](https://github.com/Significant-Gravitas/AutoGPT/pull/4737) has made it +so that the config is no longer a singleton object that is materialized from the import +state, but it's still treated as a +[god object](https://en.wikipedia.org/wiki/God_object) containing all information about +the system and _critically_ allowing any system to reference configuration information +about other parts of the system. + +### What we want + +- It should still be reasonable to collate the entire system configuration in a + sensible way. +- The configuration should be validatable and validated. +- The system configuration should be a _serializable_ representation of an `Agent`. +- The configuration system should provide a clear (albeit very low-level) contract + about user-configurable aspects of the system. +- The configuration should reasonably manage default values and user-provided overrides. +- The configuration system needs to handle credentials in a reasonable way. +- The configuration should be the representation of some amount of system state, like + api budgets and resource usage. These aspects are recorded in the configuration and + updated by the system itself. +- Agent systems should have encapsulated views of the configuration. E.g. the memory + system should know about memory configuration but nothing about command configuration. + +## Workspace + +There are two ways to think about the workspace: + +- The workspace is a scratch space for an agent where it can store files, write code, + and do pretty much whatever else it likes. +- The workspace is, at any given point in time, the single source of truth for what an + agent is. It contains the serializable state (the configuration) as well as all + other working state (stored files, databases, memories, custom code). + +In the existing system there is **one** workspace. And because the workspace holds so +much agent state, that means a user can only work with one agent at a time. + +## Memory + +The memory system has been under extremely active development. +See [#3536](https://github.com/Significant-Gravitas/AutoGPT/issues/3536) and +[#4208](https://github.com/Significant-Gravitas/AutoGPT/pull/4208) for discussion and +work in the `master` branch. The TL;DR is +that we noticed a couple of months ago that the `Agent` performed **worse** with +permanent memory than without it. Since then the knowledge storage and retrieval +system has been [redesigned](https://whimsical.com/memory-system-8Ae6x6QkjDwQAUe9eVJ6w1) +and partially implemented in the `master` branch. + +## Planning/Prompt-Engineering + +The planning system is the system that translates user desires/agent intentions into +language model prompts. In the course of development, it has become pretty clear +that `Planning` is the wrong name for this system + +### What we want + +- It should be incredibly obvious what's being passed to a language model, when it's + being passed, and what the language model response is. The landscape of language + model research is developing very rapidly, so building complex abstractions between + users/contributors and the language model interactions is going to make it very + difficult for us to nimbly respond to new research developments. +- Prompt-engineering should ideally be exposed in a parameterizeable way to users. +- We should, where possible, leverage OpenAI's new + [function calling api](https://openai.com/blog/function-calling-and-other-api-updates) + to get outputs in a standard machine-readable format and avoid the deep pit of + parsing json (and fixing unparsable json). + +### Planning Strategies + +The [new agent workflow](https://whimsical.com/agent-workflow-v2-NmnTQ8R7sVo7M3S43XgXmZ) +has many, many interaction points for language models. We really would like to not +distribute prompt templates and raw strings all through the system. The re-arch solution +is to encapsulate language model interactions into planning strategies. +These strategies are defined by + +- The `LanguageModelClassification` they use (`FAST` or `SMART`) +- A function `build_prompt` that takes strategy specific arguments and constructs a + `LanguageModelPrompt` (a simple container for lists of messages and functions to + pass to the language model) +- A function `parse_content` that parses the response content (a dict) into a better + formatted dict. Contracts here are intentionally loose and will tighten once we have + at least one other language model provider. + +## Resources + +Resources are kinds of services we consume from external APIs. They may have associated +credentials and costs we need to manage. Management of those credentials is implemented +as manipulation of the resource configuration. We have two categories of resources +currently + +- AI/ML model providers (including language model providers and embedding model providers, ie OpenAI) +- Memory providers (e.g. Pinecone, Weaviate, ChromaDB, etc.) + +### What we want + +- Resource abstractions should provide a common interface to different service providers + for a particular kind of service. +- Resource abstractions should manipulate the configuration to manage their credentials + and budget/accounting. +- Resource abstractions should be composable over an API (e.g. I should be able to make + an OpenAI provider that is both a LanguageModelProvider and an EmbeddingModelProvider + and use it wherever I need those services). + +## Abilities + +Along with planning and memory usage, abilities are one of the major augmentations of +augmented language models. They allow us to expand the scope of what language models +can do by hooking them up to code they can execute to obtain new knowledge or influence +the world. + +### What we want + +- Abilities should have an extremely clear interface that users can write to. +- Abilities should have an extremely clear interface that a language model can + understand +- Abilities should be declarative about their dependencies so the system can inject them +- Abilities should be executable (where sensible) in an async run loop. +- Abilities should be not have side effects unless those side effects are clear in + their representation to an agent (e.g. the BrowseWeb ability shouldn't write a file, + but the WriteFile ability can). + +## Plugins + +Users want to add lots of features that we don't want to support as first-party. +Or solution to this is a plugin system to allow users to plug in their functionality or +to construct their agent from a public plugin marketplace. Our primary concern in the +re-arch is to build a stateless plugin service interface and a simple implementation +that can load plugins from installed packages or from zip files. Future efforts will +expand this system to allow plugins to load from a marketplace or some other kind +of service. + +### What is a Plugin + +Plugins are a kind of garbage term. They refer to a number of things. + +- New commands for the agent to execute. This is the most common usage. +- Replacements for entire subsystems like memory or language model providers +- Application plugins that do things like send emails or communicate via whatsapp +- The repositories contributors create that may themselves have multiple plugins in them. + +### Usage in the existing system + +The current plugin system is _hook-based_. This means plugins don't correspond to +kinds of objects in the system, but rather to times in the system at which we defer +execution to them. The main advantage of this setup is that user code can hijack +pretty much any behavior of the agent by injecting code that supersedes the normal +agent execution. The disadvantages to this approach are numerous: + +- We have absolutely no mechanisms to enforce any security measures because the threat + surface is everything. +- We cannot reason about agent behavior in a cohesive way because control flow can be + ceded to user code at pretty much any point and arbitrarily change or break the + agent behavior +- The interface for designing a plugin is kind of terrible and difficult to standardize +- The hook based implementation means we couple ourselves to a particular flow of + control (or otherwise risk breaking plugin behavior). E.g. many of the hook targets + in the [old workflow](https://whimsical.com/agent-workflow-VAzeKcup3SR7awpNZJKTyK) + are not present or mean something entirely different in the + [new workflow](https://whimsical.com/agent-workflow-v2-NmnTQ8R7sVo7M3S43XgXmZ). +- Etc. + +### What we want + +- A concrete definition of a plugin that is narrow enough in scope that we can define + it well and reason about how it will work in the system. +- A set of abstractions that let us define a plugin by its storage format and location +- A service interface that knows how to parse the plugin abstractions and turn them + into concrete classes and objects. + + +## Some Notes on how and why we'll use OO in this project + +First and foremost, Python itself is an object-oriented language. It's +underlying [data model](https://docs.python.org/3/reference/datamodel.html) is built +with object-oriented programming in mind. It offers useful tools like abstract base +classes to communicate interfaces to developers who want to, e.g., write plugins, or +help work on implementations. If we were working in a different language that offered +different tools, we'd use a different paradigm. + +While many things are classes in the re-arch, they are not classes in the same way. +There are three kinds of things (roughly) that are written as classes in the re-arch: +1. **Configuration**: AutoGPT has *a lot* of configuration. This configuration + is *data* and we use **[Pydantic](https://docs.pydantic.dev/latest/)** to manage it as + pydantic is basically industry standard for this stuff. It provides runtime validation + for all the configuration and allows us to easily serialize configuration to both basic + python types (dicts, lists, and primitives) as well as serialize to json, which is + important for us being able to put representations of agents + [on the wire](https://en.wikipedia.org/wiki/Wire_protocol) for web applications and + agent-to-agent communication. *These are essentially + [structs](https://en.wikipedia.org/wiki/Struct_(C_programming_language)) rather than + traditional classes.* +2. **Internal Data**: Very similar to configuration, AutoGPT passes around boatloads + of internal data. We are interacting with language models and language model APIs + which means we are handling lots of *structured* but *raw* text. Here we also + leverage **pydantic** to both *parse* and *validate* the internal data and also to + give us concrete types which we can use static type checkers to validate against + and discover problems before they show up as bugs at runtime. *These are + essentially [structs](https://en.wikipedia.org/wiki/Struct_(C_programming_language)) + rather than traditional classes.* +3. **System Interfaces**: This is our primary traditional use of classes in the + re-arch. We have a bunch of systems. We want many of those systems to have + alternative implementations (e.g. via plugins). We use abstract base classes to + define interfaces to communicate with people who might want to provide those + plugins. We provide a single concrete implementation of most of those systems as a + subclass of the interface. This should not be controversial. + +The approach is consistent with +[prior](https://github.com/Significant-Gravitas/AutoGPT/issues/2458) +[work](https://github.com/Significant-Gravitas/AutoGPT/pull/2442) done by other +maintainers in this direction. + +From an organization standpoint, OO programming is by far the most popular programming +paradigm (especially for Python). It's the one most often taught in programming classes +and the one with the most available online training for people interested in +contributing. + +Finally, and importantly, we scoped the plan and initial design of the re-arch as a +large group of maintainers and collaborators early on. This is consistent with the +design we chose and no-one offered alternatives. diff --git a/autogpts/autogpt/autogpt/core/README.md b/autogpts/autogpt/autogpt/core/README.md new file mode 100644 index 000000000000..ff97e2c59f51 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/README.md @@ -0,0 +1,92 @@ +# AutoGPT Core + +This subpackage contains the ongoing work for the +[AutoGPT Re-arch](https://github.com/Significant-Gravitas/AutoGPT/issues/4770). It is +a work in progress and is not yet feature complete. In particular, it does not yet +have many of the AutoGPT commands implemented and is pending ongoing work to +[re-incorporate vector-based memory and knowledge retrieval](https://github.com/Significant-Gravitas/AutoGPT/issues/3536). + +## [Overview](ARCHITECTURE_NOTES.md) + +The AutoGPT Re-arch is a re-implementation of the AutoGPT agent that is designed to be more modular, +more extensible, and more maintainable than the original AutoGPT agent. It is also designed to be +more accessible to new developers and to be easier to contribute to. The re-arch is a work in progress +and is not yet feature complete. It is also not yet ready for production use. + +## Running the Re-arch Code + +1. Open the `autogpt/core` folder in a terminal + +2. Set up a dedicated virtual environment: + `python -m venv .venv` + +3. Install everything needed to run the project: + `poetry install` + + +## CLI Application + +There are two client applications for AutoGPT included. + +:star2: **This is the reference application I'm working with for now** :star2: + +The first app is a straight CLI application. I have not done anything yet to port all the friendly display stuff from the ~~`logger.typewriter_log`~~`user_friendly_output` logic. + +- [Entry Point](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpts/autogpt/autogpt/core/runner/cli_app/cli.py) +- [Client Application](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpts/autogpt/autogpt/core/runner/cli_app/main.py) + +You'll then need a settings file. Run + +``` +poetry run cli make-settings +``` + +This will write a file called `default_agent_settings.yaml` with all the user-modifiable +configuration keys to `~/auto-gpt/default_agent_settings.yml` and make the `auto-gpt` directory +in your user directory if it doesn't exist). Your user directory is located in different places +depending on your operating system: + +- On Linux, it's `/home/USERNAME` +- On Windows, it's `C:\Users\USERNAME` +- On Mac, it's `/Users/USERNAME` + +At a bare minimum, you'll need to set `openai.credentials.api_key` to your OpenAI API Key to run +the model. + +You can then run AutoGPT with + +``` +poetry run cli run +``` + +to launch the interaction loop. + +### CLI Web App + +:warning: I am not actively developing this application. I am primarily working with the traditional CLI app +described above. It is a very good place to get involved if you have web application design experience and are +looking to get involved in the re-arch. + +The second app is still a CLI, but it sets up a local webserver that the client application talks to +rather than invoking calls to the Agent library code directly. This application is essentially a sketch +at this point as the folks who were driving it have had less time (and likely not enough clarity) to proceed. + +- [Entry Point](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpts/autogpt/autogpt/core/runner/cli_web_app/cli.py) +- [Client Application](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpts/autogpt/autogpt/core/runner/cli_web_app/client/client.py) +- [Server API](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpts/autogpt/autogpt/core/runner/cli_web_app/server/api.py) + +To run, you still need to generate a default configuration. You can do + +``` +poetry run cli-web make-settings +``` + +It invokes the same command as the bare CLI app, so follow the instructions above about setting your API key. + +To run, do + +``` +poetry run cli-web client +``` + +This will launch a webserver and then start the client cli application to communicate with it. diff --git a/autogpts/autogpt/autogpt/core/__init__.py b/autogpts/autogpt/autogpt/core/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/autogpts/autogpt/autogpt/core/ability/__init__.py b/autogpts/autogpt/autogpt/core/ability/__init__.py new file mode 100644 index 000000000000..3709b9277a15 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/ability/__init__.py @@ -0,0 +1,18 @@ +"""The command system provides a way to extend the functionality of the AI agent.""" +from autogpt.core.ability.base import Ability, AbilityConfiguration, AbilityRegistry +from autogpt.core.ability.schema import AbilityResult +from autogpt.core.ability.simple import ( + AbilityRegistryConfiguration, + AbilityRegistrySettings, + SimpleAbilityRegistry, +) + +__all__ = [ + "Ability", + "AbilityConfiguration", + "AbilityRegistry", + "AbilityResult", + "AbilityRegistryConfiguration", + "AbilityRegistrySettings", + "SimpleAbilityRegistry", +] diff --git a/autogpts/autogpt/autogpt/core/ability/base.py b/autogpts/autogpt/autogpt/core/ability/base.py new file mode 100644 index 000000000000..2686c101cfc1 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/ability/base.py @@ -0,0 +1,88 @@ +import abc +from pprint import pformat +from typing import Any, ClassVar + +import inflection +from pydantic import Field + +from autogpt.core.configuration import SystemConfiguration +from autogpt.core.planning.simple import LanguageModelConfiguration +from autogpt.core.plugin.base import PluginLocation +from autogpt.core.resource.model_providers import CompletionModelFunction +from autogpt.core.utils.json_schema import JSONSchema + +from .schema import AbilityResult + + +class AbilityConfiguration(SystemConfiguration): + """Struct for model configuration.""" + + location: PluginLocation + packages_required: list[str] = Field(default_factory=list) + language_model_required: LanguageModelConfiguration = None + memory_provider_required: bool = False + workspace_required: bool = False + + +class Ability(abc.ABC): + """A class representing an agent ability.""" + + default_configuration: ClassVar[AbilityConfiguration] + + @classmethod + def name(cls) -> str: + """The name of the ability.""" + return inflection.underscore(cls.__name__) + + @property + @classmethod + @abc.abstractmethod + def description(cls) -> str: + """A detailed description of what the ability does.""" + ... + + @property + @classmethod + @abc.abstractmethod + def parameters(cls) -> dict[str, JSONSchema]: + ... + + @abc.abstractmethod + async def __call__(self, *args: Any, **kwargs: Any) -> AbilityResult: + ... + + def __str__(self) -> str: + return pformat(self.spec) + + @property + @classmethod + def spec(cls) -> CompletionModelFunction: + return CompletionModelFunction( + name=cls.name(), + description=cls.description, + parameters=cls.parameters, + ) + + +class AbilityRegistry(abc.ABC): + @abc.abstractmethod + def register_ability( + self, ability_name: str, ability_configuration: AbilityConfiguration + ) -> None: + ... + + @abc.abstractmethod + def list_abilities(self) -> list[str]: + ... + + @abc.abstractmethod + def dump_abilities(self) -> list[CompletionModelFunction]: + ... + + @abc.abstractmethod + def get_ability(self, ability_name: str) -> Ability: + ... + + @abc.abstractmethod + async def perform(self, ability_name: str, **kwargs: Any) -> AbilityResult: + ... diff --git a/autogpts/autogpt/autogpt/core/ability/builtins/__init__.py b/autogpts/autogpt/autogpt/core/ability/builtins/__init__.py new file mode 100644 index 000000000000..93936dbc68be --- /dev/null +++ b/autogpts/autogpt/autogpt/core/ability/builtins/__init__.py @@ -0,0 +1,12 @@ +from autogpt.core.ability.builtins.create_new_ability import CreateNewAbility +from autogpt.core.ability.builtins.query_language_model import QueryLanguageModel + +BUILTIN_ABILITIES = { + QueryLanguageModel.name(): QueryLanguageModel, +} + +__all__ = [ + "BUILTIN_ABILITIES", + "CreateNewAbility", + "QueryLanguageModel", +] diff --git a/autogpts/autogpt/autogpt/core/ability/builtins/create_new_ability.py b/autogpts/autogpt/autogpt/core/ability/builtins/create_new_ability.py new file mode 100644 index 000000000000..55550cafc6d8 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/ability/builtins/create_new_ability.py @@ -0,0 +1,107 @@ +import logging +from typing import ClassVar + +from autogpt.core.ability.base import Ability, AbilityConfiguration +from autogpt.core.ability.schema import AbilityResult +from autogpt.core.plugin.simple import PluginLocation, PluginStorageFormat +from autogpt.core.utils.json_schema import JSONSchema + + +class CreateNewAbility(Ability): + default_configuration = AbilityConfiguration( + location=PluginLocation( + storage_format=PluginStorageFormat.INSTALLED_PACKAGE, + storage_route="autogpt.core.ability.builtins.CreateNewAbility", + ), + ) + + def __init__( + self, + logger: logging.Logger, + configuration: AbilityConfiguration, + ): + self._logger = logger + self._configuration = configuration + + description: ClassVar[str] = "Create a new ability by writing python code." + + parameters: ClassVar[dict[str, JSONSchema]] = { + "ability_name": JSONSchema( + description="A meaningful and concise name for the new ability.", + type=JSONSchema.Type.STRING, + required=True, + ), + "description": JSONSchema( + description=( + "A detailed description of the ability and its uses, " + "including any limitations." + ), + type=JSONSchema.Type.STRING, + required=True, + ), + "arguments": JSONSchema( + description="A list of arguments that the ability will accept.", + type=JSONSchema.Type.ARRAY, + items=JSONSchema( + type=JSONSchema.Type.OBJECT, + properties={ + "name": JSONSchema( + description="The name of the argument.", + type=JSONSchema.Type.STRING, + ), + "type": JSONSchema( + description=( + "The type of the argument. " + "Must be a standard json schema type." + ), + type=JSONSchema.Type.STRING, + ), + "description": JSONSchema( + description=( + "A detailed description of the argument and its uses." + ), + type=JSONSchema.Type.STRING, + ), + }, + ), + ), + "required_arguments": JSONSchema( + description="A list of the names of the arguments that are required.", + type=JSONSchema.Type.ARRAY, + items=JSONSchema( + description="The names of the arguments that are required.", + type=JSONSchema.Type.STRING, + ), + ), + "package_requirements": JSONSchema( + description=( + "A list of the names of the Python packages that are required to " + "execute the ability." + ), + type=JSONSchema.Type.ARRAY, + items=JSONSchema( + description=( + "The of the Python package that is required to execute the ability." + ), + type=JSONSchema.Type.STRING, + ), + ), + "code": JSONSchema( + description=( + "The Python code that will be executed when the ability is called." + ), + type=JSONSchema.Type.STRING, + required=True, + ), + } + + async def __call__( + self, + ability_name: str, + description: str, + arguments: list[dict], + required_arguments: list[str], + package_requirements: list[str], + code: str, + ) -> AbilityResult: + raise NotImplementedError diff --git a/autogpts/autogpt/autogpt/core/ability/builtins/file_operations.py b/autogpts/autogpt/autogpt/core/ability/builtins/file_operations.py new file mode 100644 index 000000000000..08dc8c7a97f8 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/ability/builtins/file_operations.py @@ -0,0 +1,170 @@ +import logging +import os +from typing import ClassVar + +from autogpt.core.ability.base import Ability, AbilityConfiguration +from autogpt.core.ability.schema import AbilityResult, ContentType, Knowledge +from autogpt.core.plugin.simple import PluginLocation, PluginStorageFormat +from autogpt.core.utils.json_schema import JSONSchema +from autogpt.core.workspace import Workspace + + +class ReadFile(Ability): + default_configuration = AbilityConfiguration( + location=PluginLocation( + storage_format=PluginStorageFormat.INSTALLED_PACKAGE, + storage_route="autogpt.core.ability.builtins.ReadFile", + ), + packages_required=["unstructured"], + workspace_required=True, + ) + + def __init__( + self, + logger: logging.Logger, + workspace: Workspace, + ): + self._logger = logger + self._workspace = workspace + + description: ClassVar[str] = "Read and parse all text from a file." + + parameters: ClassVar[dict[str, JSONSchema]] = { + "filename": JSONSchema( + type=JSONSchema.Type.STRING, + description="The name of the file to read.", + ), + } + + def _check_preconditions(self, filename: str) -> AbilityResult | None: + message = "" + try: + pass + except ImportError: + message = "Package charset_normalizer is not installed." + + try: + file_path = self._workspace.get_path(filename) + if not file_path.exists(): + message = f"File {filename} does not exist." + if not file_path.is_file(): + message = f"{filename} is not a file." + except ValueError as e: + message = str(e) + + if message: + return AbilityResult( + ability_name=self.name(), + ability_args={"filename": filename}, + success=False, + message=message, + data=None, + ) + + def __call__(self, filename: str) -> AbilityResult: + if result := self._check_preconditions(filename): + return result + + from unstructured.partition.auto import partition + + file_path = self._workspace.get_path(filename) + try: + elements = partition(str(file_path)) + # TODO: Lots of other potentially useful information is available + # in the partitioned file. Consider returning more of it. + new_knowledge = Knowledge( + content="\n\n".join([element.text for element in elements]), + content_type=ContentType.TEXT, + content_metadata={"filename": filename}, + ) + success = True + message = f"File {file_path} read successfully." + except IOError as e: + new_knowledge = None + success = False + message = str(e) + + return AbilityResult( + ability_name=self.name(), + ability_args={"filename": filename}, + success=success, + message=message, + new_knowledge=new_knowledge, + ) + + +class WriteFile(Ability): + default_configuration = AbilityConfiguration( + location=PluginLocation( + storage_format=PluginStorageFormat.INSTALLED_PACKAGE, + storage_route="autogpt.core.ability.builtins.WriteFile", + ), + packages_required=["unstructured"], + workspace_required=True, + ) + + def __init__( + self, + logger: logging.Logger, + workspace: Workspace, + ): + self._logger = logger + self._workspace = workspace + + description: ClassVar[str] = "Write text to a file." + + parameters: ClassVar[dict[str, JSONSchema]] = { + "filename": JSONSchema( + type=JSONSchema.Type.STRING, + description="The name of the file to write.", + ), + "contents": JSONSchema( + type=JSONSchema.Type.STRING, + description="The contents of the file to write.", + ), + } + + def _check_preconditions( + self, filename: str, contents: str + ) -> AbilityResult | None: + message = "" + try: + file_path = self._workspace.get_path(filename) + if file_path.exists(): + message = f"File {filename} already exists." + if len(contents): + message = f"File {filename} was not given any content." + except ValueError as e: + message = str(e) + + if message: + return AbilityResult( + ability_name=self.name(), + ability_args={"filename": filename, "contents": contents}, + success=False, + message=message, + data=None, + ) + + def __call__(self, filename: str, contents: str) -> AbilityResult: + if result := self._check_preconditions(filename, contents): + return result + + file_path = self._workspace.get_path(filename) + try: + directory = os.path.dirname(file_path) + os.makedirs(directory) + with open(filename, "w", encoding="utf-8") as f: + f.write(contents) + success = True + message = f"File {file_path} written successfully." + except IOError as e: + success = False + message = str(e) + + return AbilityResult( + ability_name=self.name(), + ability_args={"filename": filename}, + success=success, + message=message, + ) diff --git a/autogpts/autogpt/autogpt/core/ability/builtins/query_language_model.py b/autogpts/autogpt/autogpt/core/ability/builtins/query_language_model.py new file mode 100644 index 000000000000..7a6ae68eea55 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/ability/builtins/query_language_model.py @@ -0,0 +1,66 @@ +import logging +from typing import ClassVar + +from autogpt.core.ability.base import Ability, AbilityConfiguration +from autogpt.core.ability.schema import AbilityResult +from autogpt.core.planning.simple import LanguageModelConfiguration +from autogpt.core.plugin.simple import PluginLocation, PluginStorageFormat +from autogpt.core.resource.model_providers import ( + ChatMessage, + ChatModelProvider, + ModelProviderName, + OpenAIModelName, +) +from autogpt.core.utils.json_schema import JSONSchema + + +class QueryLanguageModel(Ability): + default_configuration = AbilityConfiguration( + location=PluginLocation( + storage_format=PluginStorageFormat.INSTALLED_PACKAGE, + storage_route="autogpt.core.ability.builtins.QueryLanguageModel", + ), + language_model_required=LanguageModelConfiguration( + model_name=OpenAIModelName.GPT3, + provider_name=ModelProviderName.OPENAI, + temperature=0.9, + ), + ) + + def __init__( + self, + logger: logging.Logger, + configuration: AbilityConfiguration, + language_model_provider: ChatModelProvider, + ): + self._logger = logger + self._configuration = configuration + self._language_model_provider = language_model_provider + + description: ClassVar[str] = ( + "Query a language model." + " A query should be a question and any relevant context." + ) + + parameters: ClassVar[dict[str, JSONSchema]] = { + "query": JSONSchema( + type=JSONSchema.Type.STRING, + description=( + "A query for a language model. " + "A query should contain a question and any relevant context." + ), + ) + } + + async def __call__(self, query: str) -> AbilityResult: + model_response = await self._language_model_provider.create_chat_completion( + model_prompt=[ChatMessage.user(query)], + functions=[], + model_name=self._configuration.language_model_required.model_name, + ) + return AbilityResult( + ability_name=self.name(), + ability_args={"query": query}, + success=True, + message=model_response.response.content or "", + ) diff --git a/autogpts/autogpt/autogpt/core/ability/schema.py b/autogpts/autogpt/autogpt/core/ability/schema.py new file mode 100644 index 000000000000..3d20a7b929a4 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/ability/schema.py @@ -0,0 +1,30 @@ +import enum +from typing import Any + +from pydantic import BaseModel + + +class ContentType(str, enum.Enum): + # TBD what these actually are. + TEXT = "text" + CODE = "code" + + +class Knowledge(BaseModel): + content: str + content_type: ContentType + content_metadata: dict[str, Any] + + +class AbilityResult(BaseModel): + """The AbilityResult is a standard response struct for an ability.""" + + ability_name: str + ability_args: dict[str, str] + success: bool + message: str + new_knowledge: Knowledge = None + + def summary(self) -> str: + kwargs = ", ".join(f"{k}={v}" for k, v in self.ability_args.items()) + return f"{self.ability_name}({kwargs}): {self.message}" diff --git a/autogpts/autogpt/autogpt/core/ability/simple.py b/autogpts/autogpt/autogpt/core/ability/simple.py new file mode 100644 index 000000000000..962413182135 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/ability/simple.py @@ -0,0 +1,97 @@ +import logging + +from autogpt.core.ability.base import Ability, AbilityConfiguration, AbilityRegistry +from autogpt.core.ability.builtins import BUILTIN_ABILITIES +from autogpt.core.ability.schema import AbilityResult +from autogpt.core.configuration import Configurable, SystemConfiguration, SystemSettings +from autogpt.core.memory.base import Memory +from autogpt.core.plugin.simple import SimplePluginService +from autogpt.core.resource.model_providers import ( + ChatModelProvider, + CompletionModelFunction, + ModelProviderName, +) +from autogpt.core.workspace.base import Workspace + + +class AbilityRegistryConfiguration(SystemConfiguration): + """Configuration for the AbilityRegistry subsystem.""" + + abilities: dict[str, AbilityConfiguration] + + +class AbilityRegistrySettings(SystemSettings): + configuration: AbilityRegistryConfiguration + + +class SimpleAbilityRegistry(AbilityRegistry, Configurable): + default_settings = AbilityRegistrySettings( + name="simple_ability_registry", + description="A simple ability registry.", + configuration=AbilityRegistryConfiguration( + abilities={ + ability_name: ability.default_configuration + for ability_name, ability in BUILTIN_ABILITIES.items() + }, + ), + ) + + def __init__( + self, + settings: AbilityRegistrySettings, + logger: logging.Logger, + memory: Memory, + workspace: Workspace, + model_providers: dict[ModelProviderName, ChatModelProvider], + ): + self._configuration = settings.configuration + self._logger = logger + self._memory = memory + self._workspace = workspace + self._model_providers = model_providers + self._abilities: list[Ability] = [] + for ( + ability_name, + ability_configuration, + ) in self._configuration.abilities.items(): + self.register_ability(ability_name, ability_configuration) + + def register_ability( + self, ability_name: str, ability_configuration: AbilityConfiguration + ) -> None: + ability_class = SimplePluginService.get_plugin(ability_configuration.location) + ability_args = { + "logger": self._logger.getChild(ability_name), + "configuration": ability_configuration, + } + if ability_configuration.packages_required: + # TODO: Check packages are installed and maybe install them. + pass + if ability_configuration.memory_provider_required: + ability_args["memory"] = self._memory + if ability_configuration.workspace_required: + ability_args["workspace"] = self._workspace + if ability_configuration.language_model_required: + ability_args["language_model_provider"] = self._model_providers[ + ability_configuration.language_model_required.provider_name + ] + ability = ability_class(**ability_args) + self._abilities.append(ability) + + def list_abilities(self) -> list[str]: + return [ + f"{ability.name()}: {ability.description}" for ability in self._abilities + ] + + def dump_abilities(self) -> list[CompletionModelFunction]: + return [ability.spec for ability in self._abilities] + + def get_ability(self, ability_name: str) -> Ability: + for ability in self._abilities: + if ability.name() == ability_name: + return ability + raise ValueError(f"Ability '{ability_name}' not found.") + + async def perform(self, ability_name: str, **kwargs) -> AbilityResult: + ability = self.get_ability(ability_name) + return await ability(**kwargs) diff --git a/autogpts/autogpt/autogpt/core/agent/__init__.py b/autogpts/autogpt/autogpt/core/agent/__init__.py new file mode 100644 index 000000000000..f4e7a5a73398 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/agent/__init__.py @@ -0,0 +1,9 @@ +"""The Agent is an autonomouos entity guided by a LLM provider.""" +from autogpt.core.agent.base import Agent +from autogpt.core.agent.simple import AgentSettings, SimpleAgent + +__all__ = [ + "Agent", + "AgentSettings", + "SimpleAgent", +] diff --git a/autogpts/autogpt/autogpt/core/agent/base.py b/autogpts/autogpt/autogpt/core/agent/base.py new file mode 100644 index 000000000000..c574dcea28c0 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/agent/base.py @@ -0,0 +1,26 @@ +import abc +import logging +from pathlib import Path + + +class Agent(abc.ABC): + @abc.abstractmethod + def __init__(self, *args, **kwargs): + ... + + @classmethod + @abc.abstractmethod + def from_workspace( + cls, + workspace_path: Path, + logger: logging.Logger, + ) -> "Agent": + ... + + @abc.abstractmethod + async def determine_next_ability(self, *args, **kwargs): + ... + + @abc.abstractmethod + def __repr__(self): + ... diff --git a/autogpts/autogpt/autogpt/core/agent/simple.py b/autogpts/autogpt/autogpt/core/agent/simple.py new file mode 100644 index 000000000000..ea113dafc201 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/agent/simple.py @@ -0,0 +1,404 @@ +import logging +from datetime import datetime +from pathlib import Path +from typing import Any + +from pydantic import BaseModel + +from autogpt.core.ability import ( + AbilityRegistrySettings, + AbilityResult, + SimpleAbilityRegistry, +) +from autogpt.core.agent.base import Agent +from autogpt.core.configuration import Configurable, SystemConfiguration, SystemSettings +from autogpt.core.memory import MemorySettings, SimpleMemory +from autogpt.core.planning import PlannerSettings, SimplePlanner, Task, TaskStatus +from autogpt.core.plugin.simple import ( + PluginLocation, + PluginStorageFormat, + SimplePluginService, +) +from autogpt.core.resource.model_providers import ( + CompletionModelFunction, + OpenAIProvider, + OpenAISettings, +) +from autogpt.core.workspace.simple import SimpleWorkspace, WorkspaceSettings + + +class AgentSystems(SystemConfiguration): + ability_registry: PluginLocation + memory: PluginLocation + openai_provider: PluginLocation + planning: PluginLocation + workspace: PluginLocation + + +class AgentConfiguration(SystemConfiguration): + cycle_count: int + max_task_cycle_count: int + creation_time: str + name: str + role: str + goals: list[str] + systems: AgentSystems + + +class AgentSystemSettings(SystemSettings): + configuration: AgentConfiguration + + +class AgentSettings(BaseModel): + agent: AgentSystemSettings + ability_registry: AbilityRegistrySettings + memory: MemorySettings + openai_provider: OpenAISettings + planning: PlannerSettings + workspace: WorkspaceSettings + + def update_agent_name_and_goals(self, agent_goals: dict) -> None: + self.agent.configuration.name = agent_goals["agent_name"] + self.agent.configuration.role = agent_goals["agent_role"] + self.agent.configuration.goals = agent_goals["agent_goals"] + + +class SimpleAgent(Agent, Configurable): + default_settings = AgentSystemSettings( + name="simple_agent", + description="A simple agent.", + configuration=AgentConfiguration( + name="Entrepreneur-GPT", + role=( + "An AI designed to autonomously develop and run businesses with " + "the sole goal of increasing your net worth." + ), + goals=[ + "Increase net worth", + "Grow Twitter Account", + "Develop and manage multiple businesses autonomously", + ], + cycle_count=0, + max_task_cycle_count=3, + creation_time="", + systems=AgentSystems( + ability_registry=PluginLocation( + storage_format=PluginStorageFormat.INSTALLED_PACKAGE, + storage_route="autogpt.core.ability.SimpleAbilityRegistry", + ), + memory=PluginLocation( + storage_format=PluginStorageFormat.INSTALLED_PACKAGE, + storage_route="autogpt.core.memory.SimpleMemory", + ), + openai_provider=PluginLocation( + storage_format=PluginStorageFormat.INSTALLED_PACKAGE, + storage_route=( + "autogpt.core.resource.model_providers.OpenAIProvider" + ), + ), + planning=PluginLocation( + storage_format=PluginStorageFormat.INSTALLED_PACKAGE, + storage_route="autogpt.core.planning.SimplePlanner", + ), + workspace=PluginLocation( + storage_format=PluginStorageFormat.INSTALLED_PACKAGE, + storage_route="autogpt.core.workspace.SimpleWorkspace", + ), + ), + ), + ) + + def __init__( + self, + settings: AgentSystemSettings, + logger: logging.Logger, + ability_registry: SimpleAbilityRegistry, + memory: SimpleMemory, + openai_provider: OpenAIProvider, + planning: SimplePlanner, + workspace: SimpleWorkspace, + ): + self._configuration = settings.configuration + self._logger = logger + self._ability_registry = ability_registry + self._memory = memory + # FIXME: Need some work to make this work as a dict of providers + # Getting the construction of the config to work is a bit tricky + self._openai_provider = openai_provider + self._planning = planning + self._workspace = workspace + self._task_queue = [] + self._completed_tasks = [] + self._current_task = None + self._next_ability = None + + @classmethod + def from_workspace( + cls, + workspace_path: Path, + logger: logging.Logger, + ) -> "SimpleAgent": + agent_settings = SimpleWorkspace.load_agent_settings(workspace_path) + agent_args = {} + + agent_args["settings"] = agent_settings.agent + agent_args["logger"] = logger + agent_args["workspace"] = cls._get_system_instance( + "workspace", + agent_settings, + logger, + ) + agent_args["openai_provider"] = cls._get_system_instance( + "openai_provider", + agent_settings, + logger, + ) + agent_args["planning"] = cls._get_system_instance( + "planning", + agent_settings, + logger, + model_providers={"openai": agent_args["openai_provider"]}, + ) + agent_args["memory"] = cls._get_system_instance( + "memory", + agent_settings, + logger, + workspace=agent_args["workspace"], + ) + + agent_args["ability_registry"] = cls._get_system_instance( + "ability_registry", + agent_settings, + logger, + workspace=agent_args["workspace"], + memory=agent_args["memory"], + model_providers={"openai": agent_args["openai_provider"]}, + ) + + return cls(**agent_args) + + async def build_initial_plan(self) -> dict: + plan = await self._planning.make_initial_plan( + agent_name=self._configuration.name, + agent_role=self._configuration.role, + agent_goals=self._configuration.goals, + abilities=self._ability_registry.list_abilities(), + ) + tasks = [Task.parse_obj(task) for task in plan.parsed_result["task_list"]] + + # TODO: Should probably do a step to evaluate the quality of the generated tasks + # and ensure that they have actionable ready and acceptance criteria + + self._task_queue.extend(tasks) + self._task_queue.sort(key=lambda t: t.priority, reverse=True) + self._task_queue[-1].context.status = TaskStatus.READY + return plan.parsed_result + + async def determine_next_ability(self, *args, **kwargs): + if not self._task_queue: + return {"response": "I don't have any tasks to work on right now."} + + self._configuration.cycle_count += 1 + task = self._task_queue.pop() + self._logger.info(f"Working on task: {task}") + + task = await self._evaluate_task_and_add_context(task) + next_ability = await self._choose_next_ability( + task, + self._ability_registry.dump_abilities(), + ) + self._current_task = task + self._next_ability = next_ability.parsed_result + return self._current_task, self._next_ability + + async def execute_next_ability(self, user_input: str, *args, **kwargs): + if user_input == "y": + ability = self._ability_registry.get_ability( + self._next_ability["next_ability"] + ) + ability_response = await ability(**self._next_ability["ability_arguments"]) + await self._update_tasks_and_memory(ability_response) + if self._current_task.context.status == TaskStatus.DONE: + self._completed_tasks.append(self._current_task) + else: + self._task_queue.append(self._current_task) + self._current_task = None + self._next_ability = None + + return ability_response.dict() + else: + raise NotImplementedError + + async def _evaluate_task_and_add_context(self, task: Task) -> Task: + """Evaluate the task and add context to it.""" + if task.context.status == TaskStatus.IN_PROGRESS: + # Nothing to do here + return task + else: + self._logger.debug(f"Evaluating task {task} and adding relevant context.") + # TODO: Look up relevant memories (need working memory system) + # TODO: Eval whether there is enough information to start the task (w/ LLM). + task.context.enough_info = True + task.context.status = TaskStatus.IN_PROGRESS + return task + + async def _choose_next_ability( + self, + task: Task, + ability_specs: list[CompletionModelFunction], + ): + """Choose the next ability to use for the task.""" + self._logger.debug(f"Choosing next ability for task {task}.") + if task.context.cycle_count > self._configuration.max_task_cycle_count: + # Don't hit the LLM, just set the next action as "breakdown_task" + # with an appropriate reason + raise NotImplementedError + elif not task.context.enough_info: + # Don't ask the LLM, just set the next action as "breakdown_task" + # with an appropriate reason + raise NotImplementedError + else: + next_ability = await self._planning.determine_next_ability( + task, ability_specs + ) + return next_ability + + async def _update_tasks_and_memory(self, ability_result: AbilityResult): + self._current_task.context.cycle_count += 1 + self._current_task.context.prior_actions.append(ability_result) + # TODO: Summarize new knowledge + # TODO: store knowledge and summaries in memory and in relevant tasks + # TODO: evaluate whether the task is complete + + def __repr__(self): + return "SimpleAgent()" + + ################################################################ + # Factory interface for agent bootstrapping and initialization # + ################################################################ + + @classmethod + def build_user_configuration(cls) -> dict[str, Any]: + """Build the user's configuration.""" + configuration_dict = { + "agent": cls.get_user_config(), + } + + system_locations = configuration_dict["agent"]["configuration"]["systems"] + for system_name, system_location in system_locations.items(): + system_class = SimplePluginService.get_plugin(system_location) + configuration_dict[system_name] = system_class.get_user_config() + configuration_dict = _prune_empty_dicts(configuration_dict) + return configuration_dict + + @classmethod + def compile_settings( + cls, logger: logging.Logger, user_configuration: dict + ) -> AgentSettings: + """Compile the user's configuration with the defaults.""" + logger.debug("Processing agent system configuration.") + configuration_dict = { + "agent": cls.build_agent_configuration( + user_configuration.get("agent", {}) + ).dict(), + } + + system_locations = configuration_dict["agent"]["configuration"]["systems"] + + # Build up default configuration + for system_name, system_location in system_locations.items(): + logger.debug(f"Compiling configuration for system {system_name}") + system_class = SimplePluginService.get_plugin(system_location) + configuration_dict[system_name] = system_class.build_agent_configuration( + user_configuration.get(system_name, {}) + ).dict() + + return AgentSettings.parse_obj(configuration_dict) + + @classmethod + async def determine_agent_name_and_goals( + cls, + user_objective: str, + agent_settings: AgentSettings, + logger: logging.Logger, + ) -> dict: + logger.debug("Loading OpenAI provider.") + provider: OpenAIProvider = cls._get_system_instance( + "openai_provider", + agent_settings, + logger=logger, + ) + logger.debug("Loading agent planner.") + agent_planner: SimplePlanner = cls._get_system_instance( + "planning", + agent_settings, + logger=logger, + model_providers={"openai": provider}, + ) + logger.debug("determining agent name and goals.") + model_response = await agent_planner.decide_name_and_goals( + user_objective, + ) + + return model_response.parsed_result + + @classmethod + def provision_agent( + cls, + agent_settings: AgentSettings, + logger: logging.Logger, + ): + agent_settings.agent.configuration.creation_time = datetime.now().strftime( + "%Y%m%d_%H%M%S" + ) + workspace: SimpleWorkspace = cls._get_system_instance( + "workspace", + agent_settings, + logger=logger, + ) + return workspace.setup_workspace(agent_settings, logger) + + @classmethod + def _get_system_instance( + cls, + system_name: str, + agent_settings: AgentSettings, + logger: logging.Logger, + *args, + **kwargs, + ): + system_locations = agent_settings.agent.configuration.systems.dict() + + system_settings = getattr(agent_settings, system_name) + system_class = SimplePluginService.get_plugin(system_locations[system_name]) + system_instance = system_class( + system_settings, + *args, + logger=logger.getChild(system_name), + **kwargs, + ) + return system_instance + + +def _prune_empty_dicts(d: dict) -> dict: + """ + Prune branches from a nested dictionary if the branch only contains empty + dictionaries at the leaves. + + Args: + d: The dictionary to prune. + + Returns: + The pruned dictionary. + """ + pruned = {} + for key, value in d.items(): + if isinstance(value, dict): + pruned_value = _prune_empty_dicts(value) + if ( + pruned_value + ): # if the pruned dictionary is not empty, add it to the result + pruned[key] = pruned_value + else: + pruned[key] = value + return pruned diff --git a/autogpts/autogpt/autogpt/core/configuration/__init__.py b/autogpts/autogpt/autogpt/core/configuration/__init__.py new file mode 100644 index 000000000000..231819299b7a --- /dev/null +++ b/autogpts/autogpt/autogpt/core/configuration/__init__.py @@ -0,0 +1,14 @@ +"""The configuration encapsulates settings for all Agent subsystems.""" +from autogpt.core.configuration.schema import ( + Configurable, + SystemConfiguration, + SystemSettings, + UserConfigurable, +) + +__all__ = [ + "Configurable", + "SystemConfiguration", + "SystemSettings", + "UserConfigurable", +] diff --git a/autogpts/autogpt/autogpt/core/configuration/schema.py b/autogpts/autogpt/autogpt/core/configuration/schema.py new file mode 100644 index 000000000000..5bc95ffac2bb --- /dev/null +++ b/autogpts/autogpt/autogpt/core/configuration/schema.py @@ -0,0 +1,351 @@ +import abc +import os +import typing +from typing import Any, Callable, Generic, Optional, Type, TypeVar, get_args + +from pydantic import BaseModel, Field, ValidationError +from pydantic.fields import ModelField, Undefined, UndefinedType +from pydantic.main import ModelMetaclass + +T = TypeVar("T") +M = TypeVar("M", bound=BaseModel) + + +def UserConfigurable( + default: T | UndefinedType = Undefined, + *args, + default_factory: Optional[Callable[[], T]] = None, + from_env: Optional[str | Callable[[], T | None]] = None, + description: str = "", + **kwargs, +) -> T: + # TODO: use this to auto-generate docs for the application configuration + return Field( + default, + *args, + default_factory=default_factory, + from_env=from_env, + description=description, + **kwargs, + user_configurable=True, + ) + + +class SystemConfiguration(BaseModel): + def get_user_config(self) -> dict[str, Any]: + return _recurse_user_config_values(self) + + @classmethod + def from_env(cls): + """ + Initializes the config object from environment variables. + + Environment variables are mapped to UserConfigurable fields using the from_env + attribute that can be passed to UserConfigurable. + """ + + def infer_field_value(field: ModelField): + field_info = field.field_info + default_value = ( + field.default + if field.default not in (None, Undefined) + else (field.default_factory() if field.default_factory else Undefined) + ) + if from_env := field_info.extra.get("from_env"): + val_from_env = ( + os.getenv(from_env) if type(from_env) is str else from_env() + ) + if val_from_env is not None: + return val_from_env + return default_value + + return _recursive_init_model(cls, infer_field_value) + + class Config: + extra = "forbid" + use_enum_values = True + validate_assignment = True + + +SC = TypeVar("SC", bound=SystemConfiguration) + + +class SystemSettings(BaseModel): + """A base class for all system settings.""" + + name: str + description: str + + class Config: + extra = "forbid" + use_enum_values = True + validate_assignment = True + + +S = TypeVar("S", bound=SystemSettings) + + +class Configurable(abc.ABC, Generic[S]): + """A base class for all configurable objects.""" + + prefix: str = "" + default_settings: typing.ClassVar[S] + + @classmethod + def get_user_config(cls) -> dict[str, Any]: + return _recurse_user_config_values(cls.default_settings) + + @classmethod + def build_agent_configuration(cls, overrides: dict = {}) -> S: + """Process the configuration for this object.""" + + base_config = _update_user_config_from_env(cls.default_settings) + final_configuration = deep_update(base_config, overrides) + + return cls.default_settings.__class__.parse_obj(final_configuration) + + +def _update_user_config_from_env(instance: BaseModel) -> dict[str, Any]: + """ + Update config fields of a Pydantic model instance from environment variables. + + Precedence: + 1. Non-default value already on the instance + 2. Value returned by `from_env()` + 3. Default value for the field + + Params: + instance: The Pydantic model instance. + + Returns: + The user config fields of the instance. + """ + + def infer_field_value(field: ModelField, value): + field_info = field.field_info + default_value = ( + field.default + if field.default not in (None, Undefined) + else (field.default_factory() if field.default_factory else None) + ) + if value == default_value and (from_env := field_info.extra.get("from_env")): + val_from_env = os.getenv(from_env) if type(from_env) is str else from_env() + if val_from_env is not None: + return val_from_env + return value + + def init_sub_config(model: Type[SC]) -> SC | None: + try: + return model.from_env() + except ValidationError as e: + # Gracefully handle missing fields + if all(e["type"] == "value_error.missing" for e in e.errors()): + return None + raise + + return _recurse_user_config_fields(instance, infer_field_value, init_sub_config) + + +def _recursive_init_model( + model: Type[M], + infer_field_value: Callable[[ModelField], Any], +) -> M: + """ + Recursively initialize the user configuration fields of a Pydantic model. + + Parameters: + model: The Pydantic model type. + infer_field_value: A callback function to infer the value of each field. + Parameters: + ModelField: The Pydantic ModelField object describing the field. + + Returns: + BaseModel: An instance of the model with the initialized configuration. + """ + user_config_fields = {} + for name, field in model.__fields__.items(): + if "user_configurable" in field.field_info.extra: + user_config_fields[name] = infer_field_value(field) + elif type(field.outer_type_) is ModelMetaclass and issubclass( + field.outer_type_, SystemConfiguration + ): + try: + user_config_fields[name] = _recursive_init_model( + model=field.outer_type_, + infer_field_value=infer_field_value, + ) + except ValidationError as e: + # Gracefully handle missing fields + if all(e["type"] == "value_error.missing" for e in e.errors()): + user_config_fields[name] = None + raise + + user_config_fields = remove_none_items(user_config_fields) + + return model.parse_obj(user_config_fields) + + +def _recurse_user_config_fields( + model: BaseModel, + infer_field_value: Callable[[ModelField, Any], Any], + init_sub_config: Optional[ + Callable[[Type[SystemConfiguration]], SystemConfiguration | None] + ] = None, +) -> dict[str, Any]: + """ + Recursively process the user configuration fields of a Pydantic model instance. + + Params: + model: The Pydantic model to iterate over. + infer_field_value: A callback function to process each field. + Params: + ModelField: The Pydantic ModelField object describing the field. + Any: The current value of the field. + init_sub_config: An optional callback function to initialize a sub-config. + Params: + Type[SystemConfiguration]: The type of the sub-config to initialize. + + Returns: + dict[str, Any]: The processed user configuration fields of the instance. + """ + user_config_fields = {} + + for name, field in model.__fields__.items(): + value = getattr(model, name) + + # Handle individual field + if "user_configurable" in field.field_info.extra: + user_config_fields[name] = infer_field_value(field, value) + + # Recurse into nested config object + elif isinstance(value, SystemConfiguration): + user_config_fields[name] = _recurse_user_config_fields( + model=value, + infer_field_value=infer_field_value, + init_sub_config=init_sub_config, + ) + + # Recurse into optional nested config object + elif value is None and init_sub_config: + field_type = get_args(field.annotation)[0] # Optional[T] -> T + if type(field_type) is ModelMetaclass and issubclass( + field_type, SystemConfiguration + ): + sub_config = init_sub_config(field_type) + if sub_config: + user_config_fields[name] = _recurse_user_config_fields( + model=sub_config, + infer_field_value=infer_field_value, + init_sub_config=init_sub_config, + ) + + elif isinstance(value, list) and all( + isinstance(i, SystemConfiguration) for i in value + ): + user_config_fields[name] = [ + _recurse_user_config_fields(i, infer_field_value, init_sub_config) + for i in value + ] + elif isinstance(value, dict) and all( + isinstance(i, SystemConfiguration) for i in value.values() + ): + user_config_fields[name] = { + k: _recurse_user_config_fields(v, infer_field_value, init_sub_config) + for k, v in value.items() + } + + return user_config_fields + + +def _recurse_user_config_values( + instance: BaseModel, + get_field_value: Callable[[ModelField, T], T] = lambda _, v: v, +) -> dict[str, Any]: + """ + This function recursively traverses the user configuration values in a Pydantic + model instance. + + Params: + instance: A Pydantic model instance. + get_field_value: A callback function to process each field. Parameters: + ModelField: The Pydantic ModelField object that describes the field. + Any: The current value of the field. + + Returns: + A dictionary containing the processed user configuration fields of the instance. + """ + user_config_values = {} + + for name, value in instance.__dict__.items(): + field = instance.__fields__[name] + if "user_configurable" in field.field_info.extra: + user_config_values[name] = get_field_value(field, value) + elif isinstance(value, SystemConfiguration): + user_config_values[name] = _recurse_user_config_values( + instance=value, get_field_value=get_field_value + ) + elif isinstance(value, list) and all( + isinstance(i, SystemConfiguration) for i in value + ): + user_config_values[name] = [ + _recurse_user_config_values(i, get_field_value) for i in value + ] + elif isinstance(value, dict) and all( + isinstance(i, SystemConfiguration) for i in value.values() + ): + user_config_values[name] = { + k: _recurse_user_config_values(v, get_field_value) + for k, v in value.items() + } + + return user_config_values + + +def _get_non_default_user_config_values(instance: BaseModel) -> dict[str, Any]: + """ + Get the non-default user config fields of a Pydantic model instance. + + Params: + instance: The Pydantic model instance. + + Returns: + dict[str, Any]: The non-default user config values on the instance. + """ + + def get_field_value(field: ModelField, value): + default = field.default_factory() if field.default_factory else field.default + if value != default: + return value + + return remove_none_items(_recurse_user_config_values(instance, get_field_value)) + + +def deep_update(original_dict: dict, update_dict: dict) -> dict: + """ + Recursively update a dictionary. + + Params: + original_dict (dict): The dictionary to be updated. + update_dict (dict): The dictionary to update with. + + Returns: + dict: The updated dictionary. + """ + for key, value in update_dict.items(): + if ( + key in original_dict + and isinstance(original_dict[key], dict) + and isinstance(value, dict) + ): + original_dict[key] = deep_update(original_dict[key], value) + else: + original_dict[key] = value + return original_dict + + +def remove_none_items(d): + if isinstance(d, dict): + return { + k: remove_none_items(v) for k, v in d.items() if v not in (None, Undefined) + } + return d diff --git a/autogpts/autogpt/autogpt/core/memory/__init__.py b/autogpts/autogpt/autogpt/core/memory/__init__.py new file mode 100644 index 000000000000..f77b61039bef --- /dev/null +++ b/autogpts/autogpt/autogpt/core/memory/__init__.py @@ -0,0 +1,9 @@ +"""The memory subsystem manages the Agent's long-term memory.""" +from autogpt.core.memory.base import Memory +from autogpt.core.memory.simple import MemorySettings, SimpleMemory + +__all__ = [ + "Memory", + "MemorySettings", + "SimpleMemory", +] diff --git a/autogpts/autogpt/autogpt/core/memory/base.py b/autogpts/autogpt/autogpt/core/memory/base.py new file mode 100644 index 000000000000..74a4284061a5 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/memory/base.py @@ -0,0 +1,13 @@ +import abc + + +class Memory(abc.ABC): + pass + + +class MemoryItem(abc.ABC): + pass + + +class MessageHistory(abc.ABC): + pass diff --git a/autogpts/autogpt/autogpt/core/memory/simple.py b/autogpts/autogpt/autogpt/core/memory/simple.py new file mode 100644 index 000000000000..2433f48bc161 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/memory/simple.py @@ -0,0 +1,47 @@ +import json +import logging + +from autogpt.core.configuration import Configurable, SystemConfiguration, SystemSettings +from autogpt.core.memory.base import Memory +from autogpt.core.workspace import Workspace + + +class MemoryConfiguration(SystemConfiguration): + pass + + +class MemorySettings(SystemSettings): + configuration: MemoryConfiguration + + +class MessageHistory: + def __init__(self, previous_message_history: list[str]): + self._message_history = previous_message_history + + +class SimpleMemory(Memory, Configurable): + default_settings = MemorySettings( + name="simple_memory", + description="A simple memory.", + configuration=MemoryConfiguration(), + ) + + def __init__( + self, + settings: MemorySettings, + logger: logging.Logger, + workspace: Workspace, + ): + self._configuration = settings.configuration + self._logger = logger + self._message_history = self._load_message_history(workspace) + + @staticmethod + def _load_message_history(workspace: Workspace): + message_history_path = workspace.get_path("message_history.json") + if message_history_path.exists(): + with message_history_path.open("r") as f: + message_history = json.load(f) + else: + message_history = [] + return MessageHistory(message_history) diff --git a/autogpts/autogpt/autogpt/core/planning/__init__.py b/autogpts/autogpt/autogpt/core/planning/__init__.py new file mode 100644 index 000000000000..99ab573f8634 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/planning/__init__.py @@ -0,0 +1,11 @@ +"""The planning system organizes the Agent's activities.""" +from autogpt.core.planning.schema import Task, TaskStatus, TaskType +from autogpt.core.planning.simple import PlannerSettings, SimplePlanner + +__all__ = [ + "PlannerSettings", + "SimplePlanner", + "Task", + "TaskStatus", + "TaskType", +] diff --git a/autogpts/autogpt/autogpt/core/planning/base.py b/autogpts/autogpt/autogpt/core/planning/base.py new file mode 100644 index 000000000000..7993c490be15 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/planning/base.py @@ -0,0 +1,54 @@ +# class Planner(abc.ABC): +# """ +# Manages the agent's planning and goal-setting +# by constructing language model prompts. +# """ +# +# @staticmethod +# @abc.abstractmethod +# async def decide_name_and_goals( +# user_objective: str, +# ) -> LanguageModelResponse: +# """Decide the name and goals of an Agent from a user-defined objective. +# +# Args: +# user_objective: The user-defined objective for the agent. +# +# Returns: +# The agent name and goals as a response from the language model. +# +# """ +# ... +# +# @abc.abstractmethod +# async def plan(self, context: PlanningContext) -> LanguageModelResponse: +# """Plan the next ability for the Agent. +# +# Args: +# context: A context object containing information about the agent's +# progress, result, memories, and feedback. +# +# +# Returns: +# The next ability the agent should take along with thoughts and reasoning. +# +# """ +# ... +# +# @abc.abstractmethod +# def reflect( +# self, +# context: ReflectionContext, +# ) -> LanguageModelResponse: +# """Reflect on a planned ability and provide self-criticism. +# +# +# Args: +# context: A context object containing information about the agent's +# reasoning, plan, thoughts, and criticism. +# +# Returns: +# Self-criticism about the agent's plan. +# +# """ +# ... diff --git a/autogpts/autogpt/autogpt/core/planning/prompt_strategies/__init__.py b/autogpts/autogpt/autogpt/core/planning/prompt_strategies/__init__.py new file mode 100644 index 000000000000..7b62279a7dde --- /dev/null +++ b/autogpts/autogpt/autogpt/core/planning/prompt_strategies/__init__.py @@ -0,0 +1,12 @@ +from .initial_plan import InitialPlan, InitialPlanConfiguration +from .name_and_goals import NameAndGoals, NameAndGoalsConfiguration +from .next_ability import NextAbility, NextAbilityConfiguration + +__all__ = [ + "InitialPlan", + "InitialPlanConfiguration", + "NameAndGoals", + "NameAndGoalsConfiguration", + "NextAbility", + "NextAbilityConfiguration", +] diff --git a/autogpts/autogpt/autogpt/core/planning/prompt_strategies/initial_plan.py b/autogpts/autogpt/autogpt/core/planning/prompt_strategies/initial_plan.py new file mode 100644 index 000000000000..ae137a985d65 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/planning/prompt_strategies/initial_plan.py @@ -0,0 +1,204 @@ +import logging + +from autogpt.core.configuration import SystemConfiguration, UserConfigurable +from autogpt.core.planning.schema import Task, TaskType +from autogpt.core.prompting import PromptStrategy +from autogpt.core.prompting.schema import ChatPrompt, LanguageModelClassification +from autogpt.core.prompting.utils import to_numbered_list +from autogpt.core.resource.model_providers import ( + AssistantChatMessage, + ChatMessage, + CompletionModelFunction, +) +from autogpt.core.utils.json_schema import JSONSchema + +logger = logging.getLogger(__name__) + + +class InitialPlanConfiguration(SystemConfiguration): + model_classification: LanguageModelClassification = UserConfigurable() + system_prompt_template: str = UserConfigurable() + system_info: list[str] = UserConfigurable() + user_prompt_template: str = UserConfigurable() + create_plan_function: dict = UserConfigurable() + + +class InitialPlan(PromptStrategy): + DEFAULT_SYSTEM_PROMPT_TEMPLATE = ( + "You are an expert project planner. " + "Your responsibility is to create work plans for autonomous agents. " + "You will be given a name, a role, set of goals for the agent to accomplish. " + "Your job is to break down those goals into a set of tasks that the agent can" + " accomplish to achieve those goals. " + "Agents are resourceful, but require clear instructions." + " Each task you create should have clearly defined `ready_criteria` that the" + " agent can check to see if the task is ready to be started." + " Each task should also have clearly defined `acceptance_criteria` that the" + " agent can check to evaluate if the task is complete. " + "You should create as many tasks as you think is necessary to accomplish" + " the goals.\n\n" + "System Info:\n{system_info}" + ) + + DEFAULT_SYSTEM_INFO = [ + "The OS you are running on is: {os_info}", + "It takes money to let you run. Your API budget is ${api_budget:.3f}", + "The current time and date is {current_time}", + ] + + DEFAULT_USER_PROMPT_TEMPLATE = ( + "You are {agent_name}, {agent_role}\n" "Your goals are:\n" "{agent_goals}" + ) + + DEFAULT_CREATE_PLAN_FUNCTION = CompletionModelFunction( + name="create_initial_agent_plan", + description=( + "Creates a set of tasks that forms the initial plan of an autonomous agent." + ), + parameters={ + "task_list": JSONSchema( + type=JSONSchema.Type.ARRAY, + items=JSONSchema( + type=JSONSchema.Type.OBJECT, + properties={ + "objective": JSONSchema( + type=JSONSchema.Type.STRING, + description=( + "An imperative verb phrase that succinctly describes " + "the task." + ), + ), + "type": JSONSchema( + type=JSONSchema.Type.STRING, + description="A categorization for the task.", + enum=[t.value for t in TaskType], + ), + "acceptance_criteria": JSONSchema( + type=JSONSchema.Type.ARRAY, + items=JSONSchema( + type=JSONSchema.Type.STRING, + description=( + "A list of measurable and testable criteria that " + "must be met for the task to be considered " + "complete." + ), + ), + ), + "priority": JSONSchema( + type=JSONSchema.Type.INTEGER, + description=( + "A number between 1 and 10 indicating the priority of " + "the task relative to other generated tasks." + ), + minimum=1, + maximum=10, + ), + "ready_criteria": JSONSchema( + type=JSONSchema.Type.ARRAY, + items=JSONSchema( + type=JSONSchema.Type.STRING, + description=( + "A list of measurable and testable criteria that " + "must be met before the task can be started." + ), + ), + ), + }, + ), + ), + }, + ) + + default_configuration: InitialPlanConfiguration = InitialPlanConfiguration( + model_classification=LanguageModelClassification.SMART_MODEL, + system_prompt_template=DEFAULT_SYSTEM_PROMPT_TEMPLATE, + system_info=DEFAULT_SYSTEM_INFO, + user_prompt_template=DEFAULT_USER_PROMPT_TEMPLATE, + create_plan_function=DEFAULT_CREATE_PLAN_FUNCTION.schema, + ) + + def __init__( + self, + model_classification: LanguageModelClassification, + system_prompt_template: str, + system_info: list[str], + user_prompt_template: str, + create_plan_function: dict, + ): + self._model_classification = model_classification + self._system_prompt_template = system_prompt_template + self._system_info = system_info + self._user_prompt_template = user_prompt_template + self._create_plan_function = CompletionModelFunction.parse(create_plan_function) + + @property + def model_classification(self) -> LanguageModelClassification: + return self._model_classification + + def build_prompt( + self, + agent_name: str, + agent_role: str, + agent_goals: list[str], + abilities: list[str], + os_info: str, + api_budget: float, + current_time: str, + **kwargs, + ) -> ChatPrompt: + template_kwargs = { + "agent_name": agent_name, + "agent_role": agent_role, + "os_info": os_info, + "api_budget": api_budget, + "current_time": current_time, + **kwargs, + } + template_kwargs["agent_goals"] = to_numbered_list( + agent_goals, **template_kwargs + ) + template_kwargs["abilities"] = to_numbered_list(abilities, **template_kwargs) + template_kwargs["system_info"] = to_numbered_list( + self._system_info, **template_kwargs + ) + + system_prompt = ChatMessage.system( + self._system_prompt_template.format(**template_kwargs), + ) + user_prompt = ChatMessage.user( + self._user_prompt_template.format(**template_kwargs), + ) + + return ChatPrompt( + messages=[system_prompt, user_prompt], + functions=[self._create_plan_function], + # TODO: + tokens_used=0, + ) + + def parse_response_content( + self, + response_content: AssistantChatMessage, + ) -> dict: + """Parse the actual text response from the objective model. + + Args: + response_content: The raw response content from the objective model. + + Returns: + The parsed response. + """ + try: + if not response_content.tool_calls: + raise ValueError( + f"LLM did not call {self._create_plan_function.name} function; " + "plan creation failed" + ) + parsed_response: object = response_content.tool_calls[0].function.arguments + parsed_response["task_list"] = [ + Task.parse_obj(task) for task in parsed_response["task_list"] + ] + except KeyError: + logger.debug(f"Failed to parse this response content: {response_content}") + raise + return parsed_response diff --git a/autogpts/autogpt/autogpt/core/planning/prompt_strategies/name_and_goals.py b/autogpts/autogpt/autogpt/core/planning/prompt_strategies/name_and_goals.py new file mode 100644 index 000000000000..133b4590d37b --- /dev/null +++ b/autogpts/autogpt/autogpt/core/planning/prompt_strategies/name_and_goals.py @@ -0,0 +1,147 @@ +import logging + +from autogpt.core.configuration import SystemConfiguration, UserConfigurable +from autogpt.core.prompting import PromptStrategy +from autogpt.core.prompting.schema import ChatPrompt, LanguageModelClassification +from autogpt.core.resource.model_providers import ( + AssistantChatMessage, + ChatMessage, + CompletionModelFunction, +) +from autogpt.core.utils.json_schema import JSONSchema + +logger = logging.getLogger(__name__) + + +class NameAndGoalsConfiguration(SystemConfiguration): + model_classification: LanguageModelClassification = UserConfigurable() + system_prompt: str = UserConfigurable() + user_prompt_template: str = UserConfigurable() + create_agent_function: dict = UserConfigurable() + + +class NameAndGoals(PromptStrategy): + DEFAULT_SYSTEM_PROMPT = ( + "Your job is to respond to a user-defined task, given in triple quotes, by " + "invoking the `create_agent` function to generate an autonomous agent to " + "complete the task. " + "You should supply a role-based name for the agent, " + "an informative description for what the agent does, and " + "1 to 5 goals that are optimally aligned with the successful completion of " + "its assigned task.\n" + "\n" + "Example Input:\n" + '"""Help me with marketing my business"""\n\n' + "Example Function Call:\n" + "create_agent(name='CMOGPT', " + "description='A professional digital marketer AI that assists Solopreneurs in " + "growing their businesses by providing world-class expertise in solving " + "marketing problems for SaaS, content products, agencies, and more.', " + "goals=['Engage in effective problem-solving, prioritization, planning, and " + "supporting execution to address your marketing needs as your virtual Chief " + "Marketing Officer.', 'Provide specific, actionable, and concise advice to " + "help you make informed decisions without the use of platitudes or overly " + "wordy explanations.', 'Identify and prioritize quick wins and cost-effective " + "campaigns that maximize results with minimal time and budget investment.', " + "'Proactively take the lead in guiding you and offering suggestions when faced " + "with unclear information or uncertainty to ensure your marketing strategy " + "remains on track.'])" + ) + + DEFAULT_USER_PROMPT_TEMPLATE = '"""{user_objective}"""' + + DEFAULT_CREATE_AGENT_FUNCTION = CompletionModelFunction( + name="create_agent", + description="Create a new autonomous AI agent to complete a given task.", + parameters={ + "agent_name": JSONSchema( + type=JSONSchema.Type.STRING, + description="A short role-based name for an autonomous agent.", + ), + "agent_role": JSONSchema( + type=JSONSchema.Type.STRING, + description=( + "An informative one sentence description of what the AI agent does" + ), + ), + "agent_goals": JSONSchema( + type=JSONSchema.Type.ARRAY, + minItems=1, + maxItems=5, + items=JSONSchema( + type=JSONSchema.Type.STRING, + ), + description=( + "One to five highly effective goals that are optimally aligned " + "with the completion of a specific task. " + "The number and complexity of the goals should correspond to the " + "complexity of the agent's primary objective." + ), + ), + }, + ) + + default_configuration: NameAndGoalsConfiguration = NameAndGoalsConfiguration( + model_classification=LanguageModelClassification.SMART_MODEL, + system_prompt=DEFAULT_SYSTEM_PROMPT, + user_prompt_template=DEFAULT_USER_PROMPT_TEMPLATE, + create_agent_function=DEFAULT_CREATE_AGENT_FUNCTION.schema, + ) + + def __init__( + self, + model_classification: LanguageModelClassification, + system_prompt: str, + user_prompt_template: str, + create_agent_function: dict, + ): + self._model_classification = model_classification + self._system_prompt_message = system_prompt + self._user_prompt_template = user_prompt_template + self._create_agent_function = CompletionModelFunction.parse( + create_agent_function + ) + + @property + def model_classification(self) -> LanguageModelClassification: + return self._model_classification + + def build_prompt(self, user_objective: str = "", **kwargs) -> ChatPrompt: + system_message = ChatMessage.system(self._system_prompt_message) + user_message = ChatMessage.user( + self._user_prompt_template.format( + user_objective=user_objective, + ) + ) + prompt = ChatPrompt( + messages=[system_message, user_message], + functions=[self._create_agent_function], + # TODO + tokens_used=0, + ) + return prompt + + def parse_response_content( + self, + response_content: AssistantChatMessage, + ) -> dict: + """Parse the actual text response from the objective model. + + Args: + response_content: The raw response content from the objective model. + + Returns: + The parsed response. + + """ + try: + if not response_content.tool_calls: + raise ValueError( + f"LLM did not call {self._create_agent_function} function; " + "agent profile creation failed" + ) + parsed_response = response_content.tool_calls[0].function.arguments + except KeyError: + logger.debug(f"Failed to parse this response content: {response_content}") + raise + return parsed_response diff --git a/autogpts/autogpt/autogpt/core/planning/prompt_strategies/next_ability.py b/autogpts/autogpt/autogpt/core/planning/prompt_strategies/next_ability.py new file mode 100644 index 000000000000..0d6daad2e207 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/planning/prompt_strategies/next_ability.py @@ -0,0 +1,201 @@ +import logging + +from autogpt.core.configuration import SystemConfiguration, UserConfigurable +from autogpt.core.planning.schema import Task +from autogpt.core.prompting import PromptStrategy +from autogpt.core.prompting.schema import ChatPrompt, LanguageModelClassification +from autogpt.core.prompting.utils import to_numbered_list +from autogpt.core.resource.model_providers import ( + AssistantChatMessage, + ChatMessage, + CompletionModelFunction, +) +from autogpt.core.utils.json_schema import JSONSchema + +logger = logging.getLogger(__name__) + + +class NextAbilityConfiguration(SystemConfiguration): + model_classification: LanguageModelClassification = UserConfigurable() + system_prompt_template: str = UserConfigurable() + system_info: list[str] = UserConfigurable() + user_prompt_template: str = UserConfigurable() + additional_ability_arguments: dict = UserConfigurable() + + +class NextAbility(PromptStrategy): + DEFAULT_SYSTEM_PROMPT_TEMPLATE = "System Info:\n{system_info}" + + DEFAULT_SYSTEM_INFO = [ + "The OS you are running on is: {os_info}", + "It takes money to let you run. Your API budget is ${api_budget:.3f}", + "The current time and date is {current_time}", + ] + + DEFAULT_USER_PROMPT_TEMPLATE = ( + "Your current task is is {task_objective}.\n" + "You have taken {cycle_count} actions on this task already. " + "Here is the actions you have taken and their results:\n" + "{action_history}\n\n" + "Here is additional information that may be useful to you:\n" + "{additional_info}\n\n" + "Additionally, you should consider the following:\n" + "{user_input}\n\n" + "Your task of {task_objective} is complete when the following acceptance" + " criteria have been met:\n" + "{acceptance_criteria}\n\n" + "Please choose one of the provided functions to accomplish this task. " + "Some tasks may require multiple functions to accomplish. If that is the case," + " choose the function that you think is most appropriate for the current" + " situation given your progress so far." + ) + + DEFAULT_ADDITIONAL_ABILITY_ARGUMENTS = { + "motivation": JSONSchema( + type=JSONSchema.Type.STRING, + description=( + "Your justification for choosing choosing this function instead of a " + "different one." + ), + ), + "self_criticism": JSONSchema( + type=JSONSchema.Type.STRING, + description=( + "Thoughtful self-criticism that explains why this function may not be " + "the best choice." + ), + ), + "reasoning": JSONSchema( + type=JSONSchema.Type.STRING, + description=( + "Your reasoning for choosing this function taking into account the " + "`motivation` and weighing the `self_criticism`." + ), + ), + } + + default_configuration: NextAbilityConfiguration = NextAbilityConfiguration( + model_classification=LanguageModelClassification.SMART_MODEL, + system_prompt_template=DEFAULT_SYSTEM_PROMPT_TEMPLATE, + system_info=DEFAULT_SYSTEM_INFO, + user_prompt_template=DEFAULT_USER_PROMPT_TEMPLATE, + additional_ability_arguments={ + k: v.to_dict() for k, v in DEFAULT_ADDITIONAL_ABILITY_ARGUMENTS.items() + }, + ) + + def __init__( + self, + model_classification: LanguageModelClassification, + system_prompt_template: str, + system_info: list[str], + user_prompt_template: str, + additional_ability_arguments: dict, + ): + self._model_classification = model_classification + self._system_prompt_template = system_prompt_template + self._system_info = system_info + self._user_prompt_template = user_prompt_template + self._additional_ability_arguments = JSONSchema.parse_properties( + additional_ability_arguments + ) + for p in self._additional_ability_arguments.values(): + p.required = True + + @property + def model_classification(self) -> LanguageModelClassification: + return self._model_classification + + def build_prompt( + self, + task: Task, + ability_specs: list[CompletionModelFunction], + os_info: str, + api_budget: float, + current_time: str, + **kwargs, + ) -> ChatPrompt: + template_kwargs = { + "os_info": os_info, + "api_budget": api_budget, + "current_time": current_time, + **kwargs, + } + + for ability in ability_specs: + ability.parameters.update(self._additional_ability_arguments) + + template_kwargs["task_objective"] = task.objective + template_kwargs["cycle_count"] = task.context.cycle_count + template_kwargs["action_history"] = to_numbered_list( + [action.summary() for action in task.context.prior_actions], + no_items_response="You have not taken any actions yet.", + **template_kwargs, + ) + template_kwargs["additional_info"] = to_numbered_list( + [memory.summary() for memory in task.context.memories] + + [info for info in task.context.supplementary_info], + no_items_response=( + "There is no additional information available at this time." + ), + **template_kwargs, + ) + template_kwargs["user_input"] = to_numbered_list( + [user_input for user_input in task.context.user_input], + no_items_response="There are no additional considerations at this time.", + **template_kwargs, + ) + template_kwargs["acceptance_criteria"] = to_numbered_list( + [acceptance_criteria for acceptance_criteria in task.acceptance_criteria], + **template_kwargs, + ) + + template_kwargs["system_info"] = to_numbered_list( + self._system_info, + **template_kwargs, + ) + + system_prompt = ChatMessage.system( + self._system_prompt_template.format(**template_kwargs) + ) + user_prompt = ChatMessage.user( + self._user_prompt_template.format(**template_kwargs) + ) + + return ChatPrompt( + messages=[system_prompt, user_prompt], + functions=ability_specs, + # TODO: + tokens_used=0, + ) + + def parse_response_content( + self, + response_content: AssistantChatMessage, + ) -> dict: + """Parse the actual text response from the objective model. + + Args: + response_content: The raw response content from the objective model. + + Returns: + The parsed response. + + """ + try: + if not response_content.tool_calls: + raise ValueError("LLM did not call any function") + + function_name = response_content.tool_calls[0].function.name + function_arguments = response_content.tool_calls[0].function.arguments + parsed_response = { + "motivation": function_arguments.pop("motivation"), + "self_criticism": function_arguments.pop("self_criticism"), + "reasoning": function_arguments.pop("reasoning"), + "next_ability": function_name, + "ability_arguments": function_arguments, + } + except KeyError: + logger.debug(f"Failed to parse this response content: {response_content}") + raise + return parsed_response diff --git a/autogpts/autogpt/autogpt/core/planning/schema.py b/autogpts/autogpt/autogpt/core/planning/schema.py new file mode 100644 index 000000000000..b9ba818275d3 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/planning/schema.py @@ -0,0 +1,48 @@ +import enum +from typing import Optional + +from pydantic import BaseModel, Field + +from autogpt.core.ability.schema import AbilityResult + + +class TaskType(str, enum.Enum): + RESEARCH = "research" + WRITE = "write" + EDIT = "edit" + CODE = "code" + DESIGN = "design" + TEST = "test" + PLAN = "plan" + + +class TaskStatus(str, enum.Enum): + BACKLOG = "backlog" + READY = "ready" + IN_PROGRESS = "in_progress" + DONE = "done" + + +class TaskContext(BaseModel): + cycle_count: int = 0 + status: TaskStatus = TaskStatus.BACKLOG + parent: Optional["Task"] = None + prior_actions: list[AbilityResult] = Field(default_factory=list) + memories: list = Field(default_factory=list) + user_input: list[str] = Field(default_factory=list) + supplementary_info: list[str] = Field(default_factory=list) + enough_info: bool = False + + +class Task(BaseModel): + objective: str + type: str # TaskType FIXME: gpt does not obey the enum parameter in its schema + priority: int + ready_criteria: list[str] + acceptance_criteria: list[str] + context: TaskContext = Field(default_factory=TaskContext) + + +# Need to resolve the circular dependency between Task and TaskContext +# once both models are defined. +TaskContext.update_forward_refs() diff --git a/autogpts/autogpt/autogpt/core/planning/simple.py b/autogpts/autogpt/autogpt/core/planning/simple.py new file mode 100644 index 000000000000..356e6712e385 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/planning/simple.py @@ -0,0 +1,188 @@ +import logging +import platform +import time + +import distro + +from autogpt.core.configuration import ( + Configurable, + SystemConfiguration, + SystemSettings, + UserConfigurable, +) +from autogpt.core.planning import prompt_strategies +from autogpt.core.planning.schema import Task +from autogpt.core.prompting import PromptStrategy +from autogpt.core.prompting.schema import LanguageModelClassification +from autogpt.core.resource.model_providers import ( + ChatModelProvider, + ChatModelResponse, + CompletionModelFunction, + ModelProviderName, + OpenAIModelName, +) +from autogpt.core.runner.client_lib.logging.helpers import dump_prompt +from autogpt.core.workspace import Workspace + + +class LanguageModelConfiguration(SystemConfiguration): + """Struct for model configuration.""" + + model_name: str = UserConfigurable() + provider_name: ModelProviderName = UserConfigurable() + temperature: float = UserConfigurable() + + +class PromptStrategiesConfiguration(SystemConfiguration): + name_and_goals: prompt_strategies.NameAndGoalsConfiguration + initial_plan: prompt_strategies.InitialPlanConfiguration + next_ability: prompt_strategies.NextAbilityConfiguration + + +class PlannerConfiguration(SystemConfiguration): + """Configuration for the Planner subsystem.""" + + models: dict[LanguageModelClassification, LanguageModelConfiguration] + prompt_strategies: PromptStrategiesConfiguration + + +class PlannerSettings(SystemSettings): + """Settings for the Planner subsystem.""" + + configuration: PlannerConfiguration + + +class SimplePlanner(Configurable): + """ + Manages the agent's planning and goal-setting + by constructing language model prompts. + """ + + default_settings = PlannerSettings( + name="planner", + description=( + "Manages the agent's planning and goal-setting " + "by constructing language model prompts." + ), + configuration=PlannerConfiguration( + models={ + LanguageModelClassification.FAST_MODEL: LanguageModelConfiguration( + model_name=OpenAIModelName.GPT3, + provider_name=ModelProviderName.OPENAI, + temperature=0.9, + ), + LanguageModelClassification.SMART_MODEL: LanguageModelConfiguration( + model_name=OpenAIModelName.GPT4, + provider_name=ModelProviderName.OPENAI, + temperature=0.9, + ), + }, + prompt_strategies=PromptStrategiesConfiguration( + name_and_goals=prompt_strategies.NameAndGoals.default_configuration, + initial_plan=prompt_strategies.InitialPlan.default_configuration, + next_ability=prompt_strategies.NextAbility.default_configuration, + ), + ), + ) + + def __init__( + self, + settings: PlannerSettings, + logger: logging.Logger, + model_providers: dict[ModelProviderName, ChatModelProvider], + workspace: Workspace = None, # Workspace is not available during bootstrapping. + ) -> None: + self._configuration = settings.configuration + self._logger = logger + self._workspace = workspace + + self._providers: dict[LanguageModelClassification, ChatModelProvider] = {} + for model, model_config in self._configuration.models.items(): + self._providers[model] = model_providers[model_config.provider_name] + + self._prompt_strategies = { + "name_and_goals": prompt_strategies.NameAndGoals( + **self._configuration.prompt_strategies.name_and_goals.dict() + ), + "initial_plan": prompt_strategies.InitialPlan( + **self._configuration.prompt_strategies.initial_plan.dict() + ), + "next_ability": prompt_strategies.NextAbility( + **self._configuration.prompt_strategies.next_ability.dict() + ), + } + + async def decide_name_and_goals(self, user_objective: str) -> ChatModelResponse: + return await self.chat_with_model( + self._prompt_strategies["name_and_goals"], + user_objective=user_objective, + ) + + async def make_initial_plan( + self, + agent_name: str, + agent_role: str, + agent_goals: list[str], + abilities: list[str], + ) -> ChatModelResponse: + return await self.chat_with_model( + self._prompt_strategies["initial_plan"], + agent_name=agent_name, + agent_role=agent_role, + agent_goals=agent_goals, + abilities=abilities, + ) + + async def determine_next_ability( + self, + task: Task, + ability_specs: list[CompletionModelFunction], + ): + return await self.chat_with_model( + self._prompt_strategies["next_ability"], + task=task, + ability_specs=ability_specs, + ) + + async def chat_with_model( + self, + prompt_strategy: PromptStrategy, + **kwargs, + ) -> ChatModelResponse: + model_classification = prompt_strategy.model_classification + model_configuration = self._configuration.models[model_classification].dict() + self._logger.debug(f"Using model configuration: {model_configuration}") + del model_configuration["provider_name"] + provider = self._providers[model_classification] + + template_kwargs = self._make_template_kwargs_for_strategy(prompt_strategy) + template_kwargs.update(kwargs) + prompt = prompt_strategy.build_prompt(**template_kwargs) + + self._logger.debug(f"Using prompt:\n{dump_prompt(prompt)}\n") + response = await provider.create_chat_completion( + model_prompt=prompt.messages, + functions=prompt.functions, + **model_configuration, + completion_parser=prompt_strategy.parse_response_content, + ) + return response + + def _make_template_kwargs_for_strategy(self, strategy: PromptStrategy): + provider = self._providers[strategy.model_classification] + template_kwargs = { + "os_info": get_os_info(), + "api_budget": provider.get_remaining_budget(), + "current_time": time.strftime("%c"), + } + return template_kwargs + + +def get_os_info() -> str: + os_name = platform.system() + os_info = ( + platform.platform(terse=True) + if os_name != "Linux" + else distro.name(pretty=True) + ) + return os_info diff --git a/autogpts/autogpt/autogpt/core/planning/templates.py b/autogpts/autogpt/autogpt/core/planning/templates.py new file mode 100644 index 000000000000..6464c8b8ade6 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/planning/templates.py @@ -0,0 +1,84 @@ +# Rules of thumb: +# - Templates don't add new lines at the end of the string. This is the +# responsibility of the or a consuming template. + +#################### +# Planner defaults # +#################### + + +USER_OBJECTIVE = ( + "Write a wikipedia style article about the project: " + "https://github.com/significant-gravitas/AutoGPT" +) + + +# Plan Prompt +# ----------- + + +PLAN_PROMPT_CONSTRAINTS = ( + "~4000 word limit for short term memory. Your short term memory is short, so " + "immediately save important information to files.", + "If you are unsure how you previously did something or want to recall past " + "events, thinking about similar events will help you remember.", + "No user assistance", + "Exclusively use the commands listed below e.g. command_name", +) + +PLAN_PROMPT_RESOURCES = ( + "Internet access for searches and information gathering.", + "Long-term memory management.", + "File output.", +) + +PLAN_PROMPT_PERFORMANCE_EVALUATIONS = ( + "Continuously review and analyze your actions to ensure you are performing to" + " the best of your abilities.", + "Constructively self-criticize your big-picture behavior constantly.", + "Reflect on past decisions and strategies to refine your approach.", + "Every command has a cost, so be smart and efficient. Aim to complete tasks in" + " the least number of steps.", + "Write all code to a file", +) + + +PLAN_PROMPT_RESPONSE_DICT = { + "thoughts": { + "text": "thought", + "reasoning": "reasoning", + "plan": "- short bulleted\n- list that conveys\n- long-term plan", + "criticism": "constructive self-criticism", + "speak": "thoughts summary to say to user", + }, + "command": {"name": "command name", "args": {"arg name": "value"}}, +} + +PLAN_PROMPT_RESPONSE_FORMAT = ( + "You should only respond in JSON format as described below\n" + "Response Format:\n" + "{response_json_structure}\n" + "Ensure the response can be parsed by Python json.loads" +) + +PLAN_TRIGGERING_PROMPT = ( + "Determine which next command to use, and respond using the format specified above:" +) + +PLAN_PROMPT_MAIN = ( + "{header}\n\n" + "GOALS:\n\n{goals}\n\n" + "Info:\n{info}\n\n" + "Constraints:\n{constraints}\n\n" + "Commands:\n{commands}\n\n" + "Resources:\n{resources}\n\n" + "Performance Evaluations:\n{performance_evaluations}\n\n" + "You should only respond in JSON format as described below\n" + "Response Format:\n{response_json_structure}\n" + "Ensure the response can be parsed by Python json.loads" +) + + +########################### +# Parameterized templates # +########################### diff --git a/autogpts/autogpt/autogpt/core/plugin/__init__.py b/autogpts/autogpt/autogpt/core/plugin/__init__.py new file mode 100644 index 000000000000..b850114b39c2 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/plugin/__init__.py @@ -0,0 +1,6 @@ +"""The plugin system allows the Agent to be extended with new functionality.""" +from autogpt.core.plugin.base import PluginService + +__all__ = [ + "PluginService", +] diff --git a/autogpts/autogpt/autogpt/core/plugin/base.py b/autogpts/autogpt/autogpt/core/plugin/base.py new file mode 100644 index 000000000000..4066a18c0316 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/plugin/base.py @@ -0,0 +1,162 @@ +import abc +import enum +from typing import TYPE_CHECKING, Type + +from pydantic import BaseModel + +from autogpt.core.configuration import SystemConfiguration, UserConfigurable + +if TYPE_CHECKING: + from autogpt.core.ability import Ability, AbilityRegistry + from autogpt.core.memory import Memory + from autogpt.core.resource.model_providers import ( + ChatModelProvider, + EmbeddingModelProvider, + ) + + # Expand to other types as needed + PluginType = ( + Type[Ability] # Swappable now + | Type[AbilityRegistry] # Swappable maybe never + | Type[ChatModelProvider] # Swappable soon + | Type[EmbeddingModelProvider] # Swappable soon + | Type[Memory] # Swappable now + # | Type[Planner] # Swappable soon + ) + + +class PluginStorageFormat(str, enum.Enum): + """Supported plugin storage formats. + + Plugins can be stored at one of these supported locations. + + """ + + INSTALLED_PACKAGE = "installed_package" # Required now, loads system defaults + WORKSPACE = "workspace" # Required now + + # Soon (requires some tooling we don't have yet). + # OPENAPI_URL = "open_api_url" + + # OTHER_FILE_PATH = "other_file_path" # Maybe later (maybe now) + # GIT = "git" # Maybe later (or soon) + # PYPI = "pypi" # Maybe later + + # Long term solution, requires design + # AUTOGPT_PLUGIN_SERVICE = "autogpt_plugin_service" + + # Feature for later maybe, automatically find plugin. + # AUTO = "auto" + + +# Installed package example +# PluginLocation( +# storage_format='installed_package', +# storage_route='autogpt_plugins.twitter.SendTwitterMessage' +# ) +# Workspace example +# PluginLocation( +# storage_format='workspace', +# storage_route='relative/path/to/plugin.pkl' +# OR +# storage_route='relative/path/to/plugin.py' +# ) +# Git +# PluginLocation( +# storage_format='git', +# Exact format TBD. +# storage_route='https://github.com/gravelBridge/AutoGPT-WolframAlpha/blob/main/autogpt-wolframalpha/wolfram_alpha.py' +# ) +# PyPI +# PluginLocation( +# storage_format='pypi', +# storage_route='package_name' +# ) + + +# PluginLocation( +# storage_format='installed_package', +# storage_route='autogpt_plugins.twitter.SendTwitterMessage' +# ) + + +# A plugin storage route. +# +# This is a string that specifies where to load a plugin from +# (e.g. an import path or file path). +PluginStorageRoute = str + + +class PluginLocation(SystemConfiguration): + """A plugin location. + + This is a combination of a plugin storage format and a plugin storage route. + It is used by the PluginService to load plugins. + + """ + + storage_format: PluginStorageFormat = UserConfigurable() + storage_route: PluginStorageRoute = UserConfigurable() + + +class PluginMetadata(BaseModel): + """Metadata about a plugin.""" + + name: str + description: str + location: PluginLocation + + +class PluginService(abc.ABC): + """Base class for plugin service. + + The plugin service should be stateless. This defines the interface for + loading plugins from various storage formats. + + """ + + @staticmethod + @abc.abstractmethod + def get_plugin(plugin_location: PluginLocation) -> "PluginType": + """Get a plugin from a plugin location.""" + ... + + #################################### + # Low-level storage format loaders # + #################################### + @staticmethod + @abc.abstractmethod + def load_from_file_path(plugin_route: PluginStorageRoute) -> "PluginType": + """Load a plugin from a file path.""" + + ... + + @staticmethod + @abc.abstractmethod + def load_from_import_path(plugin_route: PluginStorageRoute) -> "PluginType": + """Load a plugin from an import path.""" + ... + + @staticmethod + @abc.abstractmethod + def resolve_name_to_path( + plugin_route: PluginStorageRoute, path_type: str + ) -> PluginStorageRoute: + """Resolve a plugin name to a plugin path.""" + ... + + ##################################### + # High-level storage format loaders # + ##################################### + + @staticmethod + @abc.abstractmethod + def load_from_workspace(plugin_route: PluginStorageRoute) -> "PluginType": + """Load a plugin from the workspace.""" + ... + + @staticmethod + @abc.abstractmethod + def load_from_installed_package(plugin_route: PluginStorageRoute) -> "PluginType": + """Load a plugin from an installed package.""" + ... diff --git a/autogpts/autogpt/autogpt/core/plugin/simple.py b/autogpts/autogpt/autogpt/core/plugin/simple.py new file mode 100644 index 000000000000..7f0e60608d92 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/plugin/simple.py @@ -0,0 +1,75 @@ +from importlib import import_module +from typing import TYPE_CHECKING + +from autogpt.core.plugin.base import ( + PluginLocation, + PluginService, + PluginStorageFormat, + PluginStorageRoute, +) + +if TYPE_CHECKING: + from autogpt.core.plugin.base import PluginType + + +class SimplePluginService(PluginService): + @staticmethod + def get_plugin(plugin_location: dict | PluginLocation) -> "PluginType": + """Get a plugin from a plugin location.""" + if isinstance(plugin_location, dict): + plugin_location = PluginLocation.parse_obj(plugin_location) + if plugin_location.storage_format == PluginStorageFormat.WORKSPACE: + return SimplePluginService.load_from_workspace( + plugin_location.storage_route + ) + elif plugin_location.storage_format == PluginStorageFormat.INSTALLED_PACKAGE: + return SimplePluginService.load_from_installed_package( + plugin_location.storage_route + ) + else: + raise NotImplementedError( + "Plugin storage format %s is not implemented." + % plugin_location.storage_format + ) + + #################################### + # Low-level storage format loaders # + #################################### + @staticmethod + def load_from_file_path(plugin_route: PluginStorageRoute) -> "PluginType": + """Load a plugin from a file path.""" + # TODO: Define an on disk storage format and implement this. + # Can pull from existing zip file loading implementation + raise NotImplementedError("Loading from file path is not implemented.") + + @staticmethod + def load_from_import_path(plugin_route: PluginStorageRoute) -> "PluginType": + """Load a plugin from an import path.""" + module_path, _, class_name = plugin_route.rpartition(".") + return getattr(import_module(module_path), class_name) + + @staticmethod + def resolve_name_to_path( + plugin_route: PluginStorageRoute, path_type: str + ) -> PluginStorageRoute: + """Resolve a plugin name to a plugin path.""" + # TODO: Implement a discovery system for finding plugins by name from known + # storage locations. E.g. if we know that path_type is a file path, we can + # search the workspace for it. If it's an import path, we can check the core + # system and the auto_gpt_plugins package. + raise NotImplementedError("Resolving plugin name to path is not implemented.") + + ##################################### + # High-level storage format loaders # + ##################################### + + @staticmethod + def load_from_workspace(plugin_route: PluginStorageRoute) -> "PluginType": + """Load a plugin from the workspace.""" + plugin = SimplePluginService.load_from_file_path(plugin_route) + return plugin + + @staticmethod + def load_from_installed_package(plugin_route: PluginStorageRoute) -> "PluginType": + plugin = SimplePluginService.load_from_import_path(plugin_route) + return plugin diff --git a/autogpts/autogpt/autogpt/core/poetry.lock b/autogpts/autogpt/autogpt/core/poetry.lock new file mode 100644 index 000000000000..9b3a0ccd1770 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/poetry.lock @@ -0,0 +1,1345 @@ +# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. + +[[package]] +name = "agent-protocol" +version = "0.3.0" +description = "API for interacting with Agent" +optional = false +python-versions = ">=3.7,<4.0.0" +files = [ + {file = "agent_protocol-0.3.0-py3-none-any.whl", hash = "sha256:717d0fdad2e105968120fa0a99f0b29e08890951e9cbd74740dd10abf4cfe6dc"}, + {file = "agent_protocol-0.3.0.tar.gz", hash = "sha256:6239820753246bbc69f7f531293b32c69f23284158d58873ee55fe9916cd6028"}, +] + +[package.dependencies] +aiofiles = ">=23.1.0,<24.0.0" +click = ">=8.1.6,<9.0.0" +fastapi = ">=0.100.0,<0.101.0" +hypercorn = ">=0.14.4,<0.15.0" +pydantic = ">=1.10.5,<2.0.0" +pytest = ">=7.0.0,<8.0.0" +python-multipart = ">=0.0.6,<0.0.7" +requests = ">=2.31.0,<3.0.0" + +[[package]] +name = "aiofiles" +version = "23.2.1" +description = "File support for asyncio." +optional = false +python-versions = ">=3.7" +files = [ + {file = "aiofiles-23.2.1-py3-none-any.whl", hash = "sha256:19297512c647d4b27a2cf7c34caa7e405c0d60b5560618a29a9fe027b18b0107"}, + {file = "aiofiles-23.2.1.tar.gz", hash = "sha256:84ec2218d8419404abcb9f0c02df3f34c6e0a68ed41072acfb1cef5cbc29051a"}, +] + +[[package]] +name = "aiohttp" +version = "3.8.5" +description = "Async http client/server framework (asyncio)" +optional = false +python-versions = ">=3.6" +files = [ + {file = "aiohttp-3.8.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a94159871304770da4dd371f4291b20cac04e8c94f11bdea1c3478e557fbe0d8"}, + {file = "aiohttp-3.8.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:13bf85afc99ce6f9ee3567b04501f18f9f8dbbb2ea11ed1a2e079670403a7c84"}, + {file = "aiohttp-3.8.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2ce2ac5708501afc4847221a521f7e4b245abf5178cf5ddae9d5b3856ddb2f3a"}, + {file = "aiohttp-3.8.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:96943e5dcc37a6529d18766597c491798b7eb7a61d48878611298afc1fca946c"}, + {file = "aiohttp-3.8.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2ad5c3c4590bb3cc28b4382f031f3783f25ec223557124c68754a2231d989e2b"}, + {file = "aiohttp-3.8.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0c413c633d0512df4dc7fd2373ec06cc6a815b7b6d6c2f208ada7e9e93a5061d"}, + {file = "aiohttp-3.8.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df72ac063b97837a80d80dec8d54c241af059cc9bb42c4de68bd5b61ceb37caa"}, + {file = "aiohttp-3.8.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c48c5c0271149cfe467c0ff8eb941279fd6e3f65c9a388c984e0e6cf57538e14"}, + {file = "aiohttp-3.8.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:368a42363c4d70ab52c2c6420a57f190ed3dfaca6a1b19afda8165ee16416a82"}, + {file = "aiohttp-3.8.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7607ec3ce4993464368505888af5beb446845a014bc676d349efec0e05085905"}, + {file = "aiohttp-3.8.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:0d21c684808288a98914e5aaf2a7c6a3179d4df11d249799c32d1808e79503b5"}, + {file = "aiohttp-3.8.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:312fcfbacc7880a8da0ae8b6abc6cc7d752e9caa0051a53d217a650b25e9a691"}, + {file = "aiohttp-3.8.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ad093e823df03bb3fd37e7dec9d4670c34f9e24aeace76808fc20a507cace825"}, + {file = "aiohttp-3.8.5-cp310-cp310-win32.whl", hash = "sha256:33279701c04351a2914e1100b62b2a7fdb9a25995c4a104259f9a5ead7ed4802"}, + {file = "aiohttp-3.8.5-cp310-cp310-win_amd64.whl", hash = "sha256:6e4a280e4b975a2e7745573e3fc9c9ba0d1194a3738ce1cbaa80626cc9b4f4df"}, + {file = "aiohttp-3.8.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ae871a964e1987a943d83d6709d20ec6103ca1eaf52f7e0d36ee1b5bebb8b9b9"}, + {file = "aiohttp-3.8.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:461908b2578955045efde733719d62f2b649c404189a09a632d245b445c9c975"}, + {file = "aiohttp-3.8.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:72a860c215e26192379f57cae5ab12b168b75db8271f111019509a1196dfc780"}, + {file = "aiohttp-3.8.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc14be025665dba6202b6a71cfcdb53210cc498e50068bc088076624471f8bb9"}, + {file = "aiohttp-3.8.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8af740fc2711ad85f1a5c034a435782fbd5b5f8314c9a3ef071424a8158d7f6b"}, + {file = "aiohttp-3.8.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:841cd8233cbd2111a0ef0a522ce016357c5e3aff8a8ce92bcfa14cef890d698f"}, + {file = "aiohttp-3.8.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ed1c46fb119f1b59304b5ec89f834f07124cd23ae5b74288e364477641060ff"}, + {file = "aiohttp-3.8.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:84f8ae3e09a34f35c18fa57f015cc394bd1389bce02503fb30c394d04ee6b938"}, + {file = "aiohttp-3.8.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:62360cb771707cb70a6fd114b9871d20d7dd2163a0feafe43fd115cfe4fe845e"}, + {file = "aiohttp-3.8.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:23fb25a9f0a1ca1f24c0a371523546366bb642397c94ab45ad3aedf2941cec6a"}, + {file = "aiohttp-3.8.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:b0ba0d15164eae3d878260d4c4df859bbdc6466e9e6689c344a13334f988bb53"}, + {file = "aiohttp-3.8.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:5d20003b635fc6ae3f96d7260281dfaf1894fc3aa24d1888a9b2628e97c241e5"}, + {file = "aiohttp-3.8.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0175d745d9e85c40dcc51c8f88c74bfbaef9e7afeeeb9d03c37977270303064c"}, + {file = "aiohttp-3.8.5-cp311-cp311-win32.whl", hash = "sha256:2e1b1e51b0774408f091d268648e3d57f7260c1682e7d3a63cb00d22d71bb945"}, + {file = "aiohttp-3.8.5-cp311-cp311-win_amd64.whl", hash = "sha256:043d2299f6dfdc92f0ac5e995dfc56668e1587cea7f9aa9d8a78a1b6554e5755"}, + {file = "aiohttp-3.8.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:cae533195e8122584ec87531d6df000ad07737eaa3c81209e85c928854d2195c"}, + {file = "aiohttp-3.8.5-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f21e83f355643c345177a5d1d8079f9f28b5133bcd154193b799d380331d5d3"}, + {file = "aiohttp-3.8.5-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a7a75ef35f2df54ad55dbf4b73fe1da96f370e51b10c91f08b19603c64004acc"}, + {file = "aiohttp-3.8.5-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2e2e9839e14dd5308ee773c97115f1e0a1cb1d75cbeeee9f33824fa5144c7634"}, + {file = "aiohttp-3.8.5-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c44e65da1de4403d0576473e2344828ef9c4c6244d65cf4b75549bb46d40b8dd"}, + {file = "aiohttp-3.8.5-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78d847e4cde6ecc19125ccbc9bfac4a7ab37c234dd88fbb3c5c524e8e14da543"}, + {file = "aiohttp-3.8.5-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:c7a815258e5895d8900aec4454f38dca9aed71085f227537208057853f9d13f2"}, + {file = "aiohttp-3.8.5-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:8b929b9bd7cd7c3939f8bcfffa92fae7480bd1aa425279d51a89327d600c704d"}, + {file = "aiohttp-3.8.5-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:5db3a5b833764280ed7618393832e0853e40f3d3e9aa128ac0ba0f8278d08649"}, + {file = "aiohttp-3.8.5-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:a0215ce6041d501f3155dc219712bc41252d0ab76474615b9700d63d4d9292af"}, + {file = "aiohttp-3.8.5-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:fd1ed388ea7fbed22c4968dd64bab0198de60750a25fe8c0c9d4bef5abe13824"}, + {file = "aiohttp-3.8.5-cp36-cp36m-win32.whl", hash = "sha256:6e6783bcc45f397fdebc118d772103d751b54cddf5b60fbcc958382d7dd64f3e"}, + {file = "aiohttp-3.8.5-cp36-cp36m-win_amd64.whl", hash = "sha256:b5411d82cddd212644cf9360879eb5080f0d5f7d809d03262c50dad02f01421a"}, + {file = "aiohttp-3.8.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:01d4c0c874aa4ddfb8098e85d10b5e875a70adc63db91f1ae65a4b04d3344cda"}, + {file = "aiohttp-3.8.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5980a746d547a6ba173fd5ee85ce9077e72d118758db05d229044b469d9029a"}, + {file = "aiohttp-3.8.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2a482e6da906d5e6e653be079b29bc173a48e381600161c9932d89dfae5942ef"}, + {file = "aiohttp-3.8.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80bd372b8d0715c66c974cf57fe363621a02f359f1ec81cba97366948c7fc873"}, + {file = "aiohttp-3.8.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c1161b345c0a444ebcf46bf0a740ba5dcf50612fd3d0528883fdc0eff578006a"}, + {file = "aiohttp-3.8.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd56db019015b6acfaaf92e1ac40eb8434847d9bf88b4be4efe5bfd260aee692"}, + {file = "aiohttp-3.8.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:153c2549f6c004d2754cc60603d4668899c9895b8a89397444a9c4efa282aaf4"}, + {file = "aiohttp-3.8.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:4a01951fabc4ce26ab791da5f3f24dca6d9a6f24121746eb19756416ff2d881b"}, + {file = "aiohttp-3.8.5-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bfb9162dcf01f615462b995a516ba03e769de0789de1cadc0f916265c257e5d8"}, + {file = "aiohttp-3.8.5-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:7dde0009408969a43b04c16cbbe252c4f5ef4574ac226bc8815cd7342d2028b6"}, + {file = "aiohttp-3.8.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:4149d34c32f9638f38f544b3977a4c24052042affa895352d3636fa8bffd030a"}, + {file = "aiohttp-3.8.5-cp37-cp37m-win32.whl", hash = "sha256:68c5a82c8779bdfc6367c967a4a1b2aa52cd3595388bf5961a62158ee8a59e22"}, + {file = "aiohttp-3.8.5-cp37-cp37m-win_amd64.whl", hash = "sha256:2cf57fb50be5f52bda004b8893e63b48530ed9f0d6c96c84620dc92fe3cd9b9d"}, + {file = "aiohttp-3.8.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:eca4bf3734c541dc4f374ad6010a68ff6c6748f00451707f39857f429ca36ced"}, + {file = "aiohttp-3.8.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1274477e4c71ce8cfe6c1ec2f806d57c015ebf84d83373676036e256bc55d690"}, + {file = "aiohttp-3.8.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:28c543e54710d6158fc6f439296c7865b29e0b616629767e685a7185fab4a6b9"}, + {file = "aiohttp-3.8.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:910bec0c49637d213f5d9877105d26e0c4a4de2f8b1b29405ff37e9fc0ad52b8"}, + {file = "aiohttp-3.8.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5443910d662db951b2e58eb70b0fbe6b6e2ae613477129a5805d0b66c54b6cb7"}, + {file = "aiohttp-3.8.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2e460be6978fc24e3df83193dc0cc4de46c9909ed92dd47d349a452ef49325b7"}, + {file = "aiohttp-3.8.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb1558def481d84f03b45888473fc5a1f35747b5f334ef4e7a571bc0dfcb11f8"}, + {file = "aiohttp-3.8.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:34dd0c107799dcbbf7d48b53be761a013c0adf5571bf50c4ecad5643fe9cfcd0"}, + {file = "aiohttp-3.8.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:aa1990247f02a54185dc0dff92a6904521172a22664c863a03ff64c42f9b5410"}, + {file = "aiohttp-3.8.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:0e584a10f204a617d71d359fe383406305a4b595b333721fa50b867b4a0a1548"}, + {file = "aiohttp-3.8.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:a3cf433f127efa43fee6b90ea4c6edf6c4a17109d1d037d1a52abec84d8f2e42"}, + {file = "aiohttp-3.8.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:c11f5b099adafb18e65c2c997d57108b5bbeaa9eeee64a84302c0978b1ec948b"}, + {file = "aiohttp-3.8.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:84de26ddf621d7ac4c975dbea4c945860e08cccde492269db4e1538a6a6f3c35"}, + {file = "aiohttp-3.8.5-cp38-cp38-win32.whl", hash = "sha256:ab88bafedc57dd0aab55fa728ea10c1911f7e4d8b43e1d838a1739f33712921c"}, + {file = "aiohttp-3.8.5-cp38-cp38-win_amd64.whl", hash = "sha256:5798a9aad1879f626589f3df0f8b79b3608a92e9beab10e5fda02c8a2c60db2e"}, + {file = "aiohttp-3.8.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a6ce61195c6a19c785df04e71a4537e29eaa2c50fe745b732aa937c0c77169f3"}, + {file = "aiohttp-3.8.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:773dd01706d4db536335fcfae6ea2440a70ceb03dd3e7378f3e815b03c97ab51"}, + {file = "aiohttp-3.8.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f83a552443a526ea38d064588613aca983d0ee0038801bc93c0c916428310c28"}, + {file = "aiohttp-3.8.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f7372f7341fcc16f57b2caded43e81ddd18df53320b6f9f042acad41f8e049a"}, + {file = "aiohttp-3.8.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ea353162f249c8097ea63c2169dd1aa55de1e8fecbe63412a9bc50816e87b761"}, + {file = "aiohttp-3.8.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5d47ae48db0b2dcf70bc8a3bc72b3de86e2a590fc299fdbbb15af320d2659de"}, + {file = "aiohttp-3.8.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d827176898a2b0b09694fbd1088c7a31836d1a505c243811c87ae53a3f6273c1"}, + {file = "aiohttp-3.8.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3562b06567c06439d8b447037bb655ef69786c590b1de86c7ab81efe1c9c15d8"}, + {file = "aiohttp-3.8.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4e874cbf8caf8959d2adf572a78bba17cb0e9d7e51bb83d86a3697b686a0ab4d"}, + {file = "aiohttp-3.8.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6809a00deaf3810e38c628e9a33271892f815b853605a936e2e9e5129762356c"}, + {file = "aiohttp-3.8.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:33776e945d89b29251b33a7e7d006ce86447b2cfd66db5e5ded4e5cd0340585c"}, + {file = "aiohttp-3.8.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:eaeed7abfb5d64c539e2db173f63631455f1196c37d9d8d873fc316470dfbacd"}, + {file = "aiohttp-3.8.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e91d635961bec2d8f19dfeb41a539eb94bd073f075ca6dae6c8dc0ee89ad6f91"}, + {file = "aiohttp-3.8.5-cp39-cp39-win32.whl", hash = "sha256:00ad4b6f185ec67f3e6562e8a1d2b69660be43070bd0ef6fcec5211154c7df67"}, + {file = "aiohttp-3.8.5-cp39-cp39-win_amd64.whl", hash = "sha256:c0a9034379a37ae42dea7ac1e048352d96286626251862e448933c0f59cbd79c"}, + {file = "aiohttp-3.8.5.tar.gz", hash = "sha256:b9552ec52cc147dbf1944ac7ac98af7602e51ea2dcd076ed194ca3c0d1c7d0bc"}, +] + +[package.dependencies] +aiosignal = ">=1.1.2" +async-timeout = ">=4.0.0a3,<5.0" +attrs = ">=17.3.0" +charset-normalizer = ">=2.0,<4.0" +frozenlist = ">=1.1.1" +multidict = ">=4.5,<7.0" +yarl = ">=1.0,<2.0" + +[package.extras] +speedups = ["Brotli", "aiodns", "cchardet"] + +[[package]] +name = "aiosignal" +version = "1.3.1" +description = "aiosignal: a list of registered asynchronous callbacks" +optional = false +python-versions = ">=3.7" +files = [ + {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, + {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, +] + +[package.dependencies] +frozenlist = ">=1.1.0" + +[[package]] +name = "anyio" +version = "4.0.0" +description = "High level compatibility layer for multiple asynchronous event loop implementations" +optional = false +python-versions = ">=3.8" +files = [ + {file = "anyio-4.0.0-py3-none-any.whl", hash = "sha256:cfdb2b588b9fc25ede96d8db56ed50848b0b649dca3dd1df0b11f683bb9e0b5f"}, + {file = "anyio-4.0.0.tar.gz", hash = "sha256:f7ed51751b2c2add651e5747c891b47e26d2a21be5d32d9311dfe9692f3e5d7a"}, +] + +[package.dependencies] +exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} +idna = ">=2.8" +sniffio = ">=1.1" + +[package.extras] +doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] +trio = ["trio (>=0.22)"] + +[[package]] +name = "async-timeout" +version = "4.0.3" +description = "Timeout context manager for asyncio programs" +optional = false +python-versions = ">=3.7" +files = [ + {file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"}, + {file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"}, +] + +[[package]] +name = "attrs" +version = "23.1.0" +description = "Classes Without Boilerplate" +optional = false +python-versions = ">=3.7" +files = [ + {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"}, + {file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"}, +] + +[package.extras] +cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] +dev = ["attrs[docs,tests]", "pre-commit"] +docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] +tests = ["attrs[tests-no-zope]", "zope-interface"] +tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] + +[[package]] +name = "certifi" +version = "2023.7.22" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"}, + {file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"}, +] + +[[package]] +name = "charset-normalizer" +version = "3.2.0" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "charset-normalizer-3.2.0.tar.gz", hash = "sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7c70087bfee18a42b4040bb9ec1ca15a08242cf5867c58726530bdf3945672ed"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a103b3a7069b62f5d4890ae1b8f0597618f628b286b03d4bc9195230b154bfa9"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94aea8eff76ee6d1cdacb07dd2123a68283cb5569e0250feab1240058f53b623"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:db901e2ac34c931d73054d9797383d0f8009991e723dab15109740a63e7f902a"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b0dac0ff919ba34d4df1b6131f59ce95b08b9065233446be7e459f95554c0dc8"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:193cbc708ea3aca45e7221ae58f0fd63f933753a9bfb498a3b474878f12caaad"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09393e1b2a9461950b1c9a45d5fd251dc7c6f228acab64da1c9c0165d9c7765c"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:baacc6aee0b2ef6f3d308e197b5d7a81c0e70b06beae1f1fcacffdbd124fe0e3"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:bf420121d4c8dce6b889f0e8e4ec0ca34b7f40186203f06a946fa0276ba54029"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:c04a46716adde8d927adb9457bbe39cf473e1e2c2f5d0a16ceb837e5d841ad4f"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:aaf63899c94de41fe3cf934601b0f7ccb6b428c6e4eeb80da72c58eab077b19a"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d62e51710986674142526ab9f78663ca2b0726066ae26b78b22e0f5e571238dd"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-win32.whl", hash = "sha256:04e57ab9fbf9607b77f7d057974694b4f6b142da9ed4a199859d9d4d5c63fe96"}, + {file = "charset_normalizer-3.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:48021783bdf96e3d6de03a6e39a1171ed5bd7e8bb93fc84cc649d11490f87cea"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4957669ef390f0e6719db3613ab3a7631e68424604a7b448f079bee145da6e09"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:46fb8c61d794b78ec7134a715a3e564aafc8f6b5e338417cb19fe9f57a5a9bf2"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f25c229a6ba38a35ae6e25ca1264621cc25d4d38dca2942a7fce0b67a4efe918"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2efb1bd13885392adfda4614c33d3b68dee4921fd0ac1d3988f8cbb7d589e72a"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f30b48dd7fa1474554b0b0f3fdfdd4c13b5c737a3c6284d3cdc424ec0ffff3a"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:246de67b99b6851627d945db38147d1b209a899311b1305dd84916f2b88526c6"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bd9b3b31adcb054116447ea22caa61a285d92e94d710aa5ec97992ff5eb7cf3"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:8c2f5e83493748286002f9369f3e6607c565a6a90425a3a1fef5ae32a36d749d"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:3170c9399da12c9dc66366e9d14da8bf7147e1e9d9ea566067bbce7bb74bd9c2"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7a4826ad2bd6b07ca615c74ab91f32f6c96d08f6fcc3902ceeedaec8cdc3bcd6"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:3b1613dd5aee995ec6d4c69f00378bbd07614702a315a2cf6c1d21461fe17c23"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9e608aafdb55eb9f255034709e20d5a83b6d60c054df0802fa9c9883d0a937aa"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-win32.whl", hash = "sha256:f2a1d0fd4242bd8643ce6f98927cf9c04540af6efa92323e9d3124f57727bfc1"}, + {file = "charset_normalizer-3.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:681eb3d7e02e3c3655d1b16059fbfb605ac464c834a0c629048a30fad2b27489"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c57921cda3a80d0f2b8aec7e25c8aa14479ea92b5b51b6876d975d925a2ea346"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41b25eaa7d15909cf3ac4c96088c1f266a9a93ec44f87f1d13d4a0e86c81b982"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f058f6963fd82eb143c692cecdc89e075fa0828db2e5b291070485390b2f1c9c"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a7647ebdfb9682b7bb97e2a5e7cb6ae735b1c25008a70b906aecca294ee96cf4"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eef9df1eefada2c09a5e7a40991b9fc6ac6ef20b1372abd48d2794a316dc0449"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e03b8895a6990c9ab2cdcd0f2fe44088ca1c65ae592b8f795c3294af00a461c3"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:ee4006268ed33370957f55bf2e6f4d263eaf4dc3cfc473d1d90baff6ed36ce4a"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c4983bf937209c57240cff65906b18bb35e64ae872da6a0db937d7b4af845dd7"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:3bb7fda7260735efe66d5107fb7e6af6a7c04c7fce9b2514e04b7a74b06bf5dd"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:72814c01533f51d68702802d74f77ea026b5ec52793c791e2da806a3844a46c3"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:70c610f6cbe4b9fce272c407dd9d07e33e6bf7b4aa1b7ffb6f6ded8e634e3592"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-win32.whl", hash = "sha256:a401b4598e5d3f4a9a811f3daf42ee2291790c7f9d74b18d75d6e21dda98a1a1"}, + {file = "charset_normalizer-3.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:c0b21078a4b56965e2b12f247467b234734491897e99c1d51cee628da9786959"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:95eb302ff792e12aba9a8b8f8474ab229a83c103d74a750ec0bd1c1eea32e669"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1a100c6d595a7f316f1b6f01d20815d916e75ff98c27a01ae817439ea7726329"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6339d047dab2780cc6220f46306628e04d9750f02f983ddb37439ca47ced7149"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4b749b9cc6ee664a3300bb3a273c1ca8068c46be705b6c31cf5d276f8628a94"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a38856a971c602f98472050165cea2cdc97709240373041b69030be15047691f"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f87f746ee241d30d6ed93969de31e5ffd09a2961a051e60ae6bddde9ec3583aa"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89f1b185a01fe560bc8ae5f619e924407efca2191b56ce749ec84982fc59a32a"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e1c8a2f4c69e08e89632defbfabec2feb8a8d99edc9f89ce33c4b9e36ab63037"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2f4ac36d8e2b4cc1aa71df3dd84ff8efbe3bfb97ac41242fbcfc053c67434f46"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a386ebe437176aab38c041de1260cd3ea459c6ce5263594399880bbc398225b2"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:ccd16eb18a849fd8dcb23e23380e2f0a354e8daa0c984b8a732d9cfaba3a776d"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:e6a5bf2cba5ae1bb80b154ed68a3cfa2fa00fde979a7f50d6598d3e17d9ac20c"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:45de3f87179c1823e6d9e32156fb14c1927fcc9aba21433f088fdfb555b77c10"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-win32.whl", hash = "sha256:1000fba1057b92a65daec275aec30586c3de2401ccdcd41f8a5c1e2c87078706"}, + {file = "charset_normalizer-3.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:8b2c760cfc7042b27ebdb4a43a4453bd829a5742503599144d54a032c5dc7e9e"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:855eafa5d5a2034b4621c74925d89c5efef61418570e5ef9b37717d9c796419c"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:203f0c8871d5a7987be20c72442488a0b8cfd0f43b7973771640fc593f56321f"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e857a2232ba53ae940d3456f7533ce6ca98b81917d47adc3c7fd55dad8fab858"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e86d77b090dbddbe78867a0275cb4df08ea195e660f1f7f13435a4649e954e5"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4fb39a81950ec280984b3a44f5bd12819953dc5fa3a7e6fa7a80db5ee853952"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2dee8e57f052ef5353cf608e0b4c871aee320dd1b87d351c28764fc0ca55f9f4"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8700f06d0ce6f128de3ccdbc1acaea1ee264d2caa9ca05daaf492fde7c2a7200"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1920d4ff15ce893210c1f0c0e9d19bfbecb7983c76b33f046c13a8ffbd570252"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c1c76a1743432b4b60ab3358c937a3fe1341c828ae6194108a94c69028247f22"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f7560358a6811e52e9c4d142d497f1a6e10103d3a6881f18d04dbce3729c0e2c"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:c8063cf17b19661471ecbdb3df1c84f24ad2e389e326ccaf89e3fb2484d8dd7e"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:cd6dbe0238f7743d0efe563ab46294f54f9bc8f4b9bcf57c3c666cc5bc9d1299"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1249cbbf3d3b04902ff081ffbb33ce3377fa6e4c7356f759f3cd076cc138d020"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-win32.whl", hash = "sha256:6c409c0deba34f147f77efaa67b8e4bb83d2f11c8806405f76397ae5b8c0d1c9"}, + {file = "charset_normalizer-3.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:7095f6fbfaa55defb6b733cfeb14efaae7a29f0b59d8cf213be4e7ca0b857b80"}, + {file = "charset_normalizer-3.2.0-py3-none-any.whl", hash = "sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6"}, +] + +[[package]] +name = "click" +version = "8.1.7" +description = "Composable command line interface toolkit" +optional = false +python-versions = ">=3.7" +files = [ + {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, + {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "distro" +version = "1.8.0" +description = "Distro - an OS platform information API" +optional = false +python-versions = ">=3.6" +files = [ + {file = "distro-1.8.0-py3-none-any.whl", hash = "sha256:99522ca3e365cac527b44bde033f64c6945d90eb9f769703caaec52b09bbd3ff"}, + {file = "distro-1.8.0.tar.gz", hash = "sha256:02e111d1dc6a50abb8eed6bf31c3e48ed8b0830d1ea2a1b78c61765c2513fdd8"}, +] + +[[package]] +name = "exceptiongroup" +version = "1.1.3" +description = "Backport of PEP 654 (exception groups)" +optional = false +python-versions = ">=3.7" +files = [ + {file = "exceptiongroup-1.1.3-py3-none-any.whl", hash = "sha256:343280667a4585d195ca1cf9cef84a4e178c4b6cf2274caef9859782b567d5e3"}, + {file = "exceptiongroup-1.1.3.tar.gz", hash = "sha256:097acd85d473d75af5bb98e41b61ff7fe35efe6675e4f9370ec6ec5126d160e9"}, +] + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "fastapi" +version = "0.100.1" +description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" +optional = false +python-versions = ">=3.7" +files = [ + {file = "fastapi-0.100.1-py3-none-any.whl", hash = "sha256:ec6dd52bfc4eff3063cfcd0713b43c87640fefb2687bbbe3d8a08d94049cdf32"}, + {file = "fastapi-0.100.1.tar.gz", hash = "sha256:522700d7a469e4a973d92321ab93312448fbe20fca9c8da97effc7e7bc56df23"}, +] + +[package.dependencies] +pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<2.0.0 || >2.0.0,<2.0.1 || >2.0.1,<3.0.0" +starlette = ">=0.27.0,<0.28.0" +typing-extensions = ">=4.5.0" + +[package.extras] +all = ["email-validator (>=2.0.0)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=2.11.2)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.5)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"] + +[[package]] +name = "frozenlist" +version = "1.4.0" +description = "A list-like structure which implements collections.abc.MutableSequence" +optional = false +python-versions = ">=3.8" +files = [ + {file = "frozenlist-1.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:764226ceef3125e53ea2cb275000e309c0aa5464d43bd72abd661e27fffc26ab"}, + {file = "frozenlist-1.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d6484756b12f40003c6128bfcc3fa9f0d49a687e171186c2d85ec82e3758c559"}, + {file = "frozenlist-1.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9ac08e601308e41eb533f232dbf6b7e4cea762f9f84f6357136eed926c15d12c"}, + {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d081f13b095d74b67d550de04df1c756831f3b83dc9881c38985834387487f1b"}, + {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:71932b597f9895f011f47f17d6428252fc728ba2ae6024e13c3398a087c2cdea"}, + {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:981b9ab5a0a3178ff413bca62526bb784249421c24ad7381e39d67981be2c326"}, + {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e41f3de4df3e80de75845d3e743b3f1c4c8613c3997a912dbf0229fc61a8b963"}, + {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6918d49b1f90821e93069682c06ffde41829c346c66b721e65a5c62b4bab0300"}, + {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0e5c8764c7829343d919cc2dfc587a8db01c4f70a4ebbc49abde5d4b158b007b"}, + {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8d0edd6b1c7fb94922bf569c9b092ee187a83f03fb1a63076e7774b60f9481a8"}, + {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e29cda763f752553fa14c68fb2195150bfab22b352572cb36c43c47bedba70eb"}, + {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:0c7c1b47859ee2cac3846fde1c1dc0f15da6cec5a0e5c72d101e0f83dcb67ff9"}, + {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:901289d524fdd571be1c7be054f48b1f88ce8dddcbdf1ec698b27d4b8b9e5d62"}, + {file = "frozenlist-1.4.0-cp310-cp310-win32.whl", hash = "sha256:1a0848b52815006ea6596c395f87449f693dc419061cc21e970f139d466dc0a0"}, + {file = "frozenlist-1.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:b206646d176a007466358aa21d85cd8600a415c67c9bd15403336c331a10d956"}, + {file = "frozenlist-1.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:de343e75f40e972bae1ef6090267f8260c1446a1695e77096db6cfa25e759a95"}, + {file = "frozenlist-1.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ad2a9eb6d9839ae241701d0918f54c51365a51407fd80f6b8289e2dfca977cc3"}, + {file = "frozenlist-1.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bd7bd3b3830247580de99c99ea2a01416dfc3c34471ca1298bccabf86d0ff4dc"}, + {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bdf1847068c362f16b353163391210269e4f0569a3c166bc6a9f74ccbfc7e839"}, + {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:38461d02d66de17455072c9ba981d35f1d2a73024bee7790ac2f9e361ef1cd0c"}, + {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5a32087d720c608f42caed0ef36d2b3ea61a9d09ee59a5142d6070da9041b8f"}, + {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dd65632acaf0d47608190a71bfe46b209719bf2beb59507db08ccdbe712f969b"}, + {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:261b9f5d17cac914531331ff1b1d452125bf5daa05faf73b71d935485b0c510b"}, + {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b89ac9768b82205936771f8d2eb3ce88503b1556324c9f903e7156669f521472"}, + {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:008eb8b31b3ea6896da16c38c1b136cb9fec9e249e77f6211d479db79a4eaf01"}, + {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e74b0506fa5aa5598ac6a975a12aa8928cbb58e1f5ac8360792ef15de1aa848f"}, + {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:490132667476f6781b4c9458298b0c1cddf237488abd228b0b3650e5ecba7467"}, + {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:76d4711f6f6d08551a7e9ef28c722f4a50dd0fc204c56b4bcd95c6cc05ce6fbb"}, + {file = "frozenlist-1.4.0-cp311-cp311-win32.whl", hash = "sha256:a02eb8ab2b8f200179b5f62b59757685ae9987996ae549ccf30f983f40602431"}, + {file = "frozenlist-1.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:515e1abc578dd3b275d6a5114030b1330ba044ffba03f94091842852f806f1c1"}, + {file = "frozenlist-1.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:f0ed05f5079c708fe74bf9027e95125334b6978bf07fd5ab923e9e55e5fbb9d3"}, + {file = "frozenlist-1.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ca265542ca427bf97aed183c1676e2a9c66942e822b14dc6e5f42e038f92a503"}, + {file = "frozenlist-1.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:491e014f5c43656da08958808588cc6c016847b4360e327a62cb308c791bd2d9"}, + {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17ae5cd0f333f94f2e03aaf140bb762c64783935cc764ff9c82dff626089bebf"}, + {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e78fb68cf9c1a6aa4a9a12e960a5c9dfbdb89b3695197aa7064705662515de2"}, + {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5655a942f5f5d2c9ed93d72148226d75369b4f6952680211972a33e59b1dfdc"}, + {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c11b0746f5d946fecf750428a95f3e9ebe792c1ee3b1e96eeba145dc631a9672"}, + {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e66d2a64d44d50d2543405fb183a21f76b3b5fd16f130f5c99187c3fb4e64919"}, + {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:88f7bc0fcca81f985f78dd0fa68d2c75abf8272b1f5c323ea4a01a4d7a614efc"}, + {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5833593c25ac59ede40ed4de6d67eb42928cca97f26feea219f21d0ed0959b79"}, + {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:fec520865f42e5c7f050c2a79038897b1c7d1595e907a9e08e3353293ffc948e"}, + {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:b826d97e4276750beca7c8f0f1a4938892697a6bcd8ec8217b3312dad6982781"}, + {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ceb6ec0a10c65540421e20ebd29083c50e6d1143278746a4ef6bcf6153171eb8"}, + {file = "frozenlist-1.4.0-cp38-cp38-win32.whl", hash = "sha256:2b8bcf994563466db019fab287ff390fffbfdb4f905fc77bc1c1d604b1c689cc"}, + {file = "frozenlist-1.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:a6c8097e01886188e5be3e6b14e94ab365f384736aa1fca6a0b9e35bd4a30bc7"}, + {file = "frozenlist-1.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:6c38721585f285203e4b4132a352eb3daa19121a035f3182e08e437cface44bf"}, + {file = "frozenlist-1.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a0c6da9aee33ff0b1a451e867da0c1f47408112b3391dd43133838339e410963"}, + {file = "frozenlist-1.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:93ea75c050c5bb3d98016b4ba2497851eadf0ac154d88a67d7a6816206f6fa7f"}, + {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f61e2dc5ad442c52b4887f1fdc112f97caeff4d9e6ebe78879364ac59f1663e1"}, + {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa384489fefeb62321b238e64c07ef48398fe80f9e1e6afeff22e140e0850eef"}, + {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:10ff5faaa22786315ef57097a279b833ecab1a0bfb07d604c9cbb1c4cdc2ed87"}, + {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:007df07a6e3eb3e33e9a1fe6a9db7af152bbd8a185f9aaa6ece10a3529e3e1c6"}, + {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f4f399d28478d1f604c2ff9119907af9726aed73680e5ed1ca634d377abb087"}, + {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c5374b80521d3d3f2ec5572e05adc94601985cc526fb276d0c8574a6d749f1b3"}, + {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ce31ae3e19f3c902de379cf1323d90c649425b86de7bbdf82871b8a2a0615f3d"}, + {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7211ef110a9194b6042449431e08c4d80c0481e5891e58d429df5899690511c2"}, + {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:556de4430ce324c836789fa4560ca62d1591d2538b8ceb0b4f68fb7b2384a27a"}, + {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7645a8e814a3ee34a89c4a372011dcd817964ce8cb273c8ed6119d706e9613e3"}, + {file = "frozenlist-1.4.0-cp39-cp39-win32.whl", hash = "sha256:19488c57c12d4e8095a922f328df3f179c820c212940a498623ed39160bc3c2f"}, + {file = "frozenlist-1.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:6221d84d463fb110bdd7619b69cb43878a11d51cbb9394ae3105d082d5199167"}, + {file = "frozenlist-1.4.0.tar.gz", hash = "sha256:09163bdf0b2907454042edb19f887c6d33806adc71fbd54afc14908bfdc22251"}, +] + +[[package]] +name = "h11" +version = "0.14.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +optional = false +python-versions = ">=3.7" +files = [ + {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, + {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, +] + +[[package]] +name = "h2" +version = "4.1.0" +description = "HTTP/2 State-Machine based protocol implementation" +optional = false +python-versions = ">=3.6.1" +files = [ + {file = "h2-4.1.0-py3-none-any.whl", hash = "sha256:03a46bcf682256c95b5fd9e9a99c1323584c3eec6440d379b9903d709476bc6d"}, + {file = "h2-4.1.0.tar.gz", hash = "sha256:a83aca08fbe7aacb79fec788c9c0bac936343560ed9ec18b82a13a12c28d2abb"}, +] + +[package.dependencies] +hpack = ">=4.0,<5" +hyperframe = ">=6.0,<7" + +[[package]] +name = "hpack" +version = "4.0.0" +description = "Pure-Python HPACK header compression" +optional = false +python-versions = ">=3.6.1" +files = [ + {file = "hpack-4.0.0-py3-none-any.whl", hash = "sha256:84a076fad3dc9a9f8063ccb8041ef100867b1878b25ef0ee63847a5d53818a6c"}, + {file = "hpack-4.0.0.tar.gz", hash = "sha256:fc41de0c63e687ebffde81187a948221294896f6bdc0ae2312708df339430095"}, +] + +[[package]] +name = "hypercorn" +version = "0.14.4" +description = "A ASGI Server based on Hyper libraries and inspired by Gunicorn" +optional = false +python-versions = ">=3.7" +files = [ + {file = "hypercorn-0.14.4-py3-none-any.whl", hash = "sha256:f956200dbf8677684e6e976219ffa6691d6cf795281184b41dbb0b135ab37b8d"}, + {file = "hypercorn-0.14.4.tar.gz", hash = "sha256:3fa504efc46a271640023c9b88c3184fd64993f47a282e8ae1a13ccb285c2f67"}, +] + +[package.dependencies] +h11 = "*" +h2 = ">=3.1.0" +priority = "*" +tomli = {version = "*", markers = "python_version < \"3.11\""} +wsproto = ">=0.14.0" + +[package.extras] +docs = ["pydata_sphinx_theme"] +h3 = ["aioquic (>=0.9.0,<1.0)"] +trio = ["exceptiongroup (>=1.1.0)", "trio (>=0.22.0)"] +uvloop = ["uvloop"] + +[[package]] +name = "hyperframe" +version = "6.0.1" +description = "HTTP/2 framing layer for Python" +optional = false +python-versions = ">=3.6.1" +files = [ + {file = "hyperframe-6.0.1-py3-none-any.whl", hash = "sha256:0ec6bafd80d8ad2195c4f03aacba3a8265e57bc4cff261e802bf39970ed02a15"}, + {file = "hyperframe-6.0.1.tar.gz", hash = "sha256:ae510046231dc8e9ecb1a6586f63d2347bf4c8905914aa84ba585ae85f28a914"}, +] + +[[package]] +name = "idna" +version = "3.4" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.5" +files = [ + {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, + {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, +] + +[[package]] +name = "inflection" +version = "0.5.1" +description = "A port of Ruby on Rails inflector to Python" +optional = false +python-versions = ">=3.5" +files = [ + {file = "inflection-0.5.1-py2.py3-none-any.whl", hash = "sha256:f38b2b640938a4f35ade69ac3d053042959b62a0f1076a5bbaa1b9526605a8a2"}, + {file = "inflection-0.5.1.tar.gz", hash = "sha256:1a29730d366e996aaacffb2f1f1cb9593dc38e2ddd30c91250c6dde09ea9b417"}, +] + +[[package]] +name = "iniconfig" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.7" +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + +[[package]] +name = "jsonschema" +version = "4.19.1" +description = "An implementation of JSON Schema validation for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jsonschema-4.19.1-py3-none-any.whl", hash = "sha256:cd5f1f9ed9444e554b38ba003af06c0a8c2868131e56bfbef0550fb450c0330e"}, + {file = "jsonschema-4.19.1.tar.gz", hash = "sha256:ec84cc37cfa703ef7cd4928db24f9cb31428a5d0fa77747b8b51a847458e0bbf"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +jsonschema-specifications = ">=2023.03.6" +referencing = ">=0.28.4" +rpds-py = ">=0.7.1" + +[package.extras] +format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] +format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"] + +[[package]] +name = "jsonschema-specifications" +version = "2023.7.1" +description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jsonschema_specifications-2023.7.1-py3-none-any.whl", hash = "sha256:05adf340b659828a004220a9613be00fa3f223f2b82002e273dee62fd50524b1"}, + {file = "jsonschema_specifications-2023.7.1.tar.gz", hash = "sha256:c91a50404e88a1f6ba40636778e2ee08f6e24c5613fe4c53ac24578a5a7f72bb"}, +] + +[package.dependencies] +referencing = ">=0.28.0" + +[[package]] +name = "multidict" +version = "6.0.4" +description = "multidict implementation" +optional = false +python-versions = ">=3.7" +files = [ + {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b1a97283e0c85772d613878028fec909f003993e1007eafa715b24b377cb9b8"}, + {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eeb6dcc05e911516ae3d1f207d4b0520d07f54484c49dfc294d6e7d63b734171"}, + {file = "multidict-6.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d6d635d5209b82a3492508cf5b365f3446afb65ae7ebd755e70e18f287b0adf7"}, + {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c048099e4c9e9d615545e2001d3d8a4380bd403e1a0578734e0d31703d1b0c0b"}, + {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ea20853c6dbbb53ed34cb4d080382169b6f4554d394015f1bef35e881bf83547"}, + {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16d232d4e5396c2efbbf4f6d4df89bfa905eb0d4dc5b3549d872ab898451f569"}, + {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36c63aaa167f6c6b04ef2c85704e93af16c11d20de1d133e39de6a0e84582a93"}, + {file = "multidict-6.0.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:64bdf1086b6043bf519869678f5f2757f473dee970d7abf6da91ec00acb9cb98"}, + {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:43644e38f42e3af682690876cff722d301ac585c5b9e1eacc013b7a3f7b696a0"}, + {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7582a1d1030e15422262de9f58711774e02fa80df0d1578995c76214f6954988"}, + {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ddff9c4e225a63a5afab9dd15590432c22e8057e1a9a13d28ed128ecf047bbdc"}, + {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ee2a1ece51b9b9e7752e742cfb661d2a29e7bcdba2d27e66e28a99f1890e4fa0"}, + {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a2e4369eb3d47d2034032a26c7a80fcb21a2cb22e1173d761a162f11e562caa5"}, + {file = "multidict-6.0.4-cp310-cp310-win32.whl", hash = "sha256:574b7eae1ab267e5f8285f0fe881f17efe4b98c39a40858247720935b893bba8"}, + {file = "multidict-6.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:4dcbb0906e38440fa3e325df2359ac6cb043df8e58c965bb45f4e406ecb162cc"}, + {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0dfad7a5a1e39c53ed00d2dd0c2e36aed4650936dc18fd9a1826a5ae1cad6f03"}, + {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:64da238a09d6039e3bd39bb3aee9c21a5e34f28bfa5aa22518581f910ff94af3"}, + {file = "multidict-6.0.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ff959bee35038c4624250473988b24f846cbeb2c6639de3602c073f10410ceba"}, + {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:01a3a55bd90018c9c080fbb0b9f4891db37d148a0a18722b42f94694f8b6d4c9"}, + {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5cb09abb18c1ea940fb99360ea0396f34d46566f157122c92dfa069d3e0e982"}, + {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:666daae833559deb2d609afa4490b85830ab0dfca811a98b70a205621a6109fe"}, + {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11bdf3f5e1518b24530b8241529d2050014c884cf18b6fc69c0c2b30ca248710"}, + {file = "multidict-6.0.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d18748f2d30f94f498e852c67d61261c643b349b9d2a581131725595c45ec6c"}, + {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:458f37be2d9e4c95e2d8866a851663cbc76e865b78395090786f6cd9b3bbf4f4"}, + {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:b1a2eeedcead3a41694130495593a559a668f382eee0727352b9a41e1c45759a"}, + {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7d6ae9d593ef8641544d6263c7fa6408cc90370c8cb2bbb65f8d43e5b0351d9c"}, + {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:5979b5632c3e3534e42ca6ff856bb24b2e3071b37861c2c727ce220d80eee9ed"}, + {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dcfe792765fab89c365123c81046ad4103fcabbc4f56d1c1997e6715e8015461"}, + {file = "multidict-6.0.4-cp311-cp311-win32.whl", hash = "sha256:3601a3cece3819534b11d4efc1eb76047488fddd0c85a3948099d5da4d504636"}, + {file = "multidict-6.0.4-cp311-cp311-win_amd64.whl", hash = "sha256:81a4f0b34bd92df3da93315c6a59034df95866014ac08535fc819f043bfd51f0"}, + {file = "multidict-6.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:67040058f37a2a51ed8ea8f6b0e6ee5bd78ca67f169ce6122f3e2ec80dfe9b78"}, + {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:853888594621e6604c978ce2a0444a1e6e70c8d253ab65ba11657659dcc9100f"}, + {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:39ff62e7d0f26c248b15e364517a72932a611a9b75f35b45be078d81bdb86603"}, + {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:af048912e045a2dc732847d33821a9d84ba553f5c5f028adbd364dd4765092ac"}, + {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e8b901e607795ec06c9e42530788c45ac21ef3aaa11dbd0c69de543bfb79a9"}, + {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62501642008a8b9871ddfccbf83e4222cf8ac0d5aeedf73da36153ef2ec222d2"}, + {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:99b76c052e9f1bc0721f7541e5e8c05db3941eb9ebe7b8553c625ef88d6eefde"}, + {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:509eac6cf09c794aa27bcacfd4d62c885cce62bef7b2c3e8b2e49d365b5003fe"}, + {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:21a12c4eb6ddc9952c415f24eef97e3e55ba3af61f67c7bc388dcdec1404a067"}, + {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:5cad9430ab3e2e4fa4a2ef4450f548768400a2ac635841bc2a56a2052cdbeb87"}, + {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ab55edc2e84460694295f401215f4a58597f8f7c9466faec545093045476327d"}, + {file = "multidict-6.0.4-cp37-cp37m-win32.whl", hash = "sha256:5a4dcf02b908c3b8b17a45fb0f15b695bf117a67b76b7ad18b73cf8e92608775"}, + {file = "multidict-6.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:6ed5f161328b7df384d71b07317f4d8656434e34591f20552c7bcef27b0ab88e"}, + {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5fc1b16f586f049820c5c5b17bb4ee7583092fa0d1c4e28b5239181ff9532e0c"}, + {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1502e24330eb681bdaa3eb70d6358e818e8e8f908a22a1851dfd4e15bc2f8161"}, + {file = "multidict-6.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b692f419760c0e65d060959df05f2a531945af31fda0c8a3b3195d4efd06de11"}, + {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45e1ecb0379bfaab5eef059f50115b54571acfbe422a14f668fc8c27ba410e7e"}, + {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddd3915998d93fbcd2566ddf9cf62cdb35c9e093075f862935573d265cf8f65d"}, + {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:59d43b61c59d82f2effb39a93c48b845efe23a3852d201ed2d24ba830d0b4cf2"}, + {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc8e1d0c705233c5dd0c5e6460fbad7827d5d36f310a0fadfd45cc3029762258"}, + {file = "multidict-6.0.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6aa0418fcc838522256761b3415822626f866758ee0bc6632c9486b179d0b52"}, + {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6748717bb10339c4760c1e63da040f5f29f5ed6e59d76daee30305894069a660"}, + {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4d1a3d7ef5e96b1c9e92f973e43aa5e5b96c659c9bc3124acbbd81b0b9c8a951"}, + {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4372381634485bec7e46718edc71528024fcdc6f835baefe517b34a33c731d60"}, + {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:fc35cb4676846ef752816d5be2193a1e8367b4c1397b74a565a9d0389c433a1d"}, + {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4b9d9e4e2b37daddb5c23ea33a3417901fa7c7b3dee2d855f63ee67a0b21e5b1"}, + {file = "multidict-6.0.4-cp38-cp38-win32.whl", hash = "sha256:e41b7e2b59679edfa309e8db64fdf22399eec4b0b24694e1b2104fb789207779"}, + {file = "multidict-6.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:d6c254ba6e45d8e72739281ebc46ea5eb5f101234f3ce171f0e9f5cc86991480"}, + {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:16ab77bbeb596e14212e7bab8429f24c1579234a3a462105cda4a66904998664"}, + {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc779e9e6f7fda81b3f9aa58e3a6091d49ad528b11ed19f6621408806204ad35"}, + {file = "multidict-6.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ceef517eca3e03c1cceb22030a3e39cb399ac86bff4e426d4fc6ae49052cc60"}, + {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:281af09f488903fde97923c7744bb001a9b23b039a909460d0f14edc7bf59706"}, + {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52f2dffc8acaba9a2f27174c41c9e57f60b907bb9f096b36b1a1f3be71c6284d"}, + {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b41156839806aecb3641f3208c0dafd3ac7775b9c4c422d82ee2a45c34ba81ca"}, + {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5e3fc56f88cc98ef8139255cf8cd63eb2c586531e43310ff859d6bb3a6b51f1"}, + {file = "multidict-6.0.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8316a77808c501004802f9beebde51c9f857054a0c871bd6da8280e718444449"}, + {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f70b98cd94886b49d91170ef23ec5c0e8ebb6f242d734ed7ed677b24d50c82cf"}, + {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bf6774e60d67a9efe02b3616fee22441d86fab4c6d335f9d2051d19d90a40063"}, + {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:e69924bfcdda39b722ef4d9aa762b2dd38e4632b3641b1d9a57ca9cd18f2f83a"}, + {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:6b181d8c23da913d4ff585afd1155a0e1194c0b50c54fcfe286f70cdaf2b7176"}, + {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:52509b5be062d9eafc8170e53026fbc54cf3b32759a23d07fd935fb04fc22d95"}, + {file = "multidict-6.0.4-cp39-cp39-win32.whl", hash = "sha256:27c523fbfbdfd19c6867af7346332b62b586eed663887392cff78d614f9ec313"}, + {file = "multidict-6.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:33029f5734336aa0d4c0384525da0387ef89148dc7191aae00ca5fb23d7aafc2"}, + {file = "multidict-6.0.4.tar.gz", hash = "sha256:3666906492efb76453c0e7b97f2cf459b0682e7402c0489a95484965dbc1da49"}, +] + +[[package]] +name = "openai" +version = "0.28.0" +description = "Python client library for the OpenAI API" +optional = false +python-versions = ">=3.7.1" +files = [ + {file = "openai-0.28.0-py3-none-any.whl", hash = "sha256:d207ece78469be5648eb87b825753282225155a29d0eec6e02013ddbf8c31c0c"}, + {file = "openai-0.28.0.tar.gz", hash = "sha256:417b78c4c2864ba696aedaf1ccff77be1f04a581ab1739f0a56e0aae19e5a794"}, +] + +[package.dependencies] +aiohttp = "*" +requests = ">=2.20" +tqdm = "*" + +[package.extras] +datalib = ["numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] +dev = ["black (>=21.6b0,<22.0)", "pytest (==6.*)", "pytest-asyncio", "pytest-mock"] +embeddings = ["matplotlib", "numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)", "plotly", "scikit-learn (>=1.0.2)", "scipy", "tenacity (>=8.0.1)"] +wandb = ["numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)", "wandb"] + +[[package]] +name = "packaging" +version = "23.1" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.7" +files = [ + {file = "packaging-23.1-py3-none-any.whl", hash = "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61"}, + {file = "packaging-23.1.tar.gz", hash = "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"}, +] + +[[package]] +name = "pluggy" +version = "1.3.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pluggy-1.3.0-py3-none-any.whl", hash = "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7"}, + {file = "pluggy-1.3.0.tar.gz", hash = "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "priority" +version = "2.0.0" +description = "A pure-Python implementation of the HTTP/2 priority tree" +optional = false +python-versions = ">=3.6.1" +files = [ + {file = "priority-2.0.0-py3-none-any.whl", hash = "sha256:6f8eefce5f3ad59baf2c080a664037bb4725cd0a790d53d59ab4059288faf6aa"}, + {file = "priority-2.0.0.tar.gz", hash = "sha256:c965d54f1b8d0d0b19479db3924c7c36cf672dbf2aec92d43fbdaf4492ba18c0"}, +] + +[[package]] +name = "pydantic" +version = "1.10.12" +description = "Data validation and settings management using python type hints" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pydantic-1.10.12-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a1fcb59f2f355ec350073af41d927bf83a63b50e640f4dbaa01053a28b7a7718"}, + {file = "pydantic-1.10.12-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b7ccf02d7eb340b216ec33e53a3a629856afe1c6e0ef91d84a4e6f2fb2ca70fe"}, + {file = "pydantic-1.10.12-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8fb2aa3ab3728d950bcc885a2e9eff6c8fc40bc0b7bb434e555c215491bcf48b"}, + {file = "pydantic-1.10.12-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:771735dc43cf8383959dc9b90aa281f0b6092321ca98677c5fb6125a6f56d58d"}, + {file = "pydantic-1.10.12-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ca48477862372ac3770969b9d75f1bf66131d386dba79506c46d75e6b48c1e09"}, + {file = "pydantic-1.10.12-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a5e7add47a5b5a40c49b3036d464e3c7802f8ae0d1e66035ea16aa5b7a3923ed"}, + {file = "pydantic-1.10.12-cp310-cp310-win_amd64.whl", hash = "sha256:e4129b528c6baa99a429f97ce733fff478ec955513630e61b49804b6cf9b224a"}, + {file = "pydantic-1.10.12-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b0d191db0f92dfcb1dec210ca244fdae5cbe918c6050b342d619c09d31eea0cc"}, + {file = "pydantic-1.10.12-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:795e34e6cc065f8f498c89b894a3c6da294a936ee71e644e4bd44de048af1405"}, + {file = "pydantic-1.10.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:69328e15cfda2c392da4e713443c7dbffa1505bc9d566e71e55abe14c97ddc62"}, + {file = "pydantic-1.10.12-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2031de0967c279df0d8a1c72b4ffc411ecd06bac607a212892757db7462fc494"}, + {file = "pydantic-1.10.12-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:ba5b2e6fe6ca2b7e013398bc7d7b170e21cce322d266ffcd57cca313e54fb246"}, + {file = "pydantic-1.10.12-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2a7bac939fa326db1ab741c9d7f44c565a1d1e80908b3797f7f81a4f86bc8d33"}, + {file = "pydantic-1.10.12-cp311-cp311-win_amd64.whl", hash = "sha256:87afda5539d5140cb8ba9e8b8c8865cb5b1463924d38490d73d3ccfd80896b3f"}, + {file = "pydantic-1.10.12-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:549a8e3d81df0a85226963611950b12d2d334f214436a19537b2efed61b7639a"}, + {file = "pydantic-1.10.12-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:598da88dfa127b666852bef6d0d796573a8cf5009ffd62104094a4fe39599565"}, + {file = "pydantic-1.10.12-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ba5c4a8552bff16c61882db58544116d021d0b31ee7c66958d14cf386a5b5350"}, + {file = "pydantic-1.10.12-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c79e6a11a07da7374f46970410b41d5e266f7f38f6a17a9c4823db80dadf4303"}, + {file = "pydantic-1.10.12-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ab26038b8375581dc832a63c948f261ae0aa21f1d34c1293469f135fa92972a5"}, + {file = "pydantic-1.10.12-cp37-cp37m-win_amd64.whl", hash = "sha256:e0a16d274b588767602b7646fa05af2782576a6cf1022f4ba74cbb4db66f6ca8"}, + {file = "pydantic-1.10.12-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6a9dfa722316f4acf4460afdf5d41d5246a80e249c7ff475c43a3a1e9d75cf62"}, + {file = "pydantic-1.10.12-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a73f489aebd0c2121ed974054cb2759af8a9f747de120acd2c3394cf84176ccb"}, + {file = "pydantic-1.10.12-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b30bcb8cbfccfcf02acb8f1a261143fab622831d9c0989707e0e659f77a18e0"}, + {file = "pydantic-1.10.12-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2fcfb5296d7877af406ba1547dfde9943b1256d8928732267e2653c26938cd9c"}, + {file = "pydantic-1.10.12-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:2f9a6fab5f82ada41d56b0602606a5506aab165ca54e52bc4545028382ef1c5d"}, + {file = "pydantic-1.10.12-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:dea7adcc33d5d105896401a1f37d56b47d443a2b2605ff8a969a0ed5543f7e33"}, + {file = "pydantic-1.10.12-cp38-cp38-win_amd64.whl", hash = "sha256:1eb2085c13bce1612da8537b2d90f549c8cbb05c67e8f22854e201bde5d98a47"}, + {file = "pydantic-1.10.12-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ef6c96b2baa2100ec91a4b428f80d8f28a3c9e53568219b6c298c1125572ebc6"}, + {file = "pydantic-1.10.12-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6c076be61cd0177a8433c0adcb03475baf4ee91edf5a4e550161ad57fc90f523"}, + {file = "pydantic-1.10.12-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d5a58feb9a39f481eda4d5ca220aa8b9d4f21a41274760b9bc66bfd72595b86"}, + {file = "pydantic-1.10.12-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5f805d2d5d0a41633651a73fa4ecdd0b3d7a49de4ec3fadf062fe16501ddbf1"}, + {file = "pydantic-1.10.12-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:1289c180abd4bd4555bb927c42ee42abc3aee02b0fb2d1223fb7c6e5bef87dbe"}, + {file = "pydantic-1.10.12-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5d1197e462e0364906cbc19681605cb7c036f2475c899b6f296104ad42b9f5fb"}, + {file = "pydantic-1.10.12-cp39-cp39-win_amd64.whl", hash = "sha256:fdbdd1d630195689f325c9ef1a12900524dceb503b00a987663ff4f58669b93d"}, + {file = "pydantic-1.10.12-py3-none-any.whl", hash = "sha256:b749a43aa51e32839c9d71dc67eb1e4221bb04af1033a32e3923d46f9effa942"}, + {file = "pydantic-1.10.12.tar.gz", hash = "sha256:0fe8a415cea8f340e7a9af9c54fc71a649b43e8ca3cc732986116b3cb135d303"}, +] + +[package.dependencies] +typing-extensions = ">=4.2.0" + +[package.extras] +dotenv = ["python-dotenv (>=0.10.4)"] +email = ["email-validator (>=1.0.3)"] + +[[package]] +name = "pytest" +version = "7.4.2" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest-7.4.2-py3-none-any.whl", hash = "sha256:1d881c6124e08ff0a1bb75ba3ec0bfd8b5354a01c194ddd5a0a870a48d99b002"}, + {file = "pytest-7.4.2.tar.gz", hash = "sha256:a766259cfab564a2ad52cb1aae1b881a75c3eb7e34ca3779697c23ed47c47069"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=0.12,<2.0" +tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} + +[package.extras] +testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "python-multipart" +version = "0.0.6" +description = "A streaming multipart parser for Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "python_multipart-0.0.6-py3-none-any.whl", hash = "sha256:ee698bab5ef148b0a760751c261902cd096e57e10558e11aca17646b74ee1c18"}, + {file = "python_multipart-0.0.6.tar.gz", hash = "sha256:e9925a80bb668529f1b67c7fdb0a5dacdd7cbfc6fb0bff3ea443fe22bdd62132"}, +] + +[package.extras] +dev = ["atomicwrites (==1.2.1)", "attrs (==19.2.0)", "coverage (==6.5.0)", "hatch", "invoke (==1.7.3)", "more-itertools (==4.3.0)", "pbr (==4.3.0)", "pluggy (==1.0.0)", "py (==1.11.0)", "pytest (==7.2.0)", "pytest-cov (==4.0.0)", "pytest-timeout (==2.1.0)", "pyyaml (==5.1)"] + +[[package]] +name = "pyyaml" +version = "6.0.1" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.6" +files = [ + {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, + {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, + {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, + {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, + {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, + {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, + {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, + {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, + {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, + {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, + {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, + {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, + {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, + {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, +] + +[[package]] +name = "referencing" +version = "0.30.2" +description = "JSON Referencing + Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "referencing-0.30.2-py3-none-any.whl", hash = "sha256:449b6669b6121a9e96a7f9e410b245d471e8d48964c67113ce9afe50c8dd7bdf"}, + {file = "referencing-0.30.2.tar.gz", hash = "sha256:794ad8003c65938edcdbc027f1933215e0d0ccc0291e3ce20a4d87432b59efc0"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +rpds-py = ">=0.7.0" + +[[package]] +name = "regex" +version = "2023.8.8" +description = "Alternative regular expression module, to replace re." +optional = false +python-versions = ">=3.6" +files = [ + {file = "regex-2023.8.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:88900f521c645f784260a8d346e12a1590f79e96403971241e64c3a265c8ecdb"}, + {file = "regex-2023.8.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3611576aff55918af2697410ff0293d6071b7e00f4b09e005d614686ac4cd57c"}, + {file = "regex-2023.8.8-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b8a0ccc8f2698f120e9e5742f4b38dc944c38744d4bdfc427616f3a163dd9de5"}, + {file = "regex-2023.8.8-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c662a4cbdd6280ee56f841f14620787215a171c4e2d1744c9528bed8f5816c96"}, + {file = "regex-2023.8.8-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cf0633e4a1b667bfe0bb10b5e53fe0d5f34a6243ea2530eb342491f1adf4f739"}, + {file = "regex-2023.8.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:551ad543fa19e94943c5b2cebc54c73353ffff08228ee5f3376bd27b3d5b9800"}, + {file = "regex-2023.8.8-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54de2619f5ea58474f2ac211ceea6b615af2d7e4306220d4f3fe690c91988a61"}, + {file = "regex-2023.8.8-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5ec4b3f0aebbbe2fc0134ee30a791af522a92ad9f164858805a77442d7d18570"}, + {file = "regex-2023.8.8-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3ae646c35cb9f820491760ac62c25b6d6b496757fda2d51be429e0e7b67ae0ab"}, + {file = "regex-2023.8.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ca339088839582d01654e6f83a637a4b8194d0960477b9769d2ff2cfa0fa36d2"}, + {file = "regex-2023.8.8-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:d9b6627408021452dcd0d2cdf8da0534e19d93d070bfa8b6b4176f99711e7f90"}, + {file = "regex-2023.8.8-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:bd3366aceedf274f765a3a4bc95d6cd97b130d1dda524d8f25225d14123c01db"}, + {file = "regex-2023.8.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7aed90a72fc3654fba9bc4b7f851571dcc368120432ad68b226bd593f3f6c0b7"}, + {file = "regex-2023.8.8-cp310-cp310-win32.whl", hash = "sha256:80b80b889cb767cc47f31d2b2f3dec2db8126fbcd0cff31b3925b4dc6609dcdb"}, + {file = "regex-2023.8.8-cp310-cp310-win_amd64.whl", hash = "sha256:b82edc98d107cbc7357da7a5a695901b47d6eb0420e587256ba3ad24b80b7d0b"}, + {file = "regex-2023.8.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1e7d84d64c84ad97bf06f3c8cb5e48941f135ace28f450d86af6b6512f1c9a71"}, + {file = "regex-2023.8.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ce0f9fbe7d295f9922c0424a3637b88c6c472b75eafeaff6f910494a1fa719ef"}, + {file = "regex-2023.8.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06c57e14ac723b04458df5956cfb7e2d9caa6e9d353c0b4c7d5d54fcb1325c46"}, + {file = "regex-2023.8.8-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e7a9aaa5a1267125eef22cef3b63484c3241aaec6f48949b366d26c7250e0357"}, + {file = "regex-2023.8.8-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b7408511fca48a82a119d78a77c2f5eb1b22fe88b0d2450ed0756d194fe7a9a"}, + {file = "regex-2023.8.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14dc6f2d88192a67d708341f3085df6a4f5a0c7b03dec08d763ca2cd86e9f559"}, + {file = "regex-2023.8.8-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48c640b99213643d141550326f34f0502fedb1798adb3c9eb79650b1ecb2f177"}, + {file = "regex-2023.8.8-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0085da0f6c6393428bf0d9c08d8b1874d805bb55e17cb1dfa5ddb7cfb11140bf"}, + {file = "regex-2023.8.8-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:964b16dcc10c79a4a2be9f1273fcc2684a9eedb3906439720598029a797b46e6"}, + {file = "regex-2023.8.8-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7ce606c14bb195b0e5108544b540e2c5faed6843367e4ab3deb5c6aa5e681208"}, + {file = "regex-2023.8.8-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:40f029d73b10fac448c73d6eb33d57b34607f40116e9f6e9f0d32e9229b147d7"}, + {file = "regex-2023.8.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3b8e6ea6be6d64104d8e9afc34c151926f8182f84e7ac290a93925c0db004bfd"}, + {file = "regex-2023.8.8-cp311-cp311-win32.whl", hash = "sha256:942f8b1f3b223638b02df7df79140646c03938d488fbfb771824f3d05fc083a8"}, + {file = "regex-2023.8.8-cp311-cp311-win_amd64.whl", hash = "sha256:51d8ea2a3a1a8fe4f67de21b8b93757005213e8ac3917567872f2865185fa7fb"}, + {file = "regex-2023.8.8-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e951d1a8e9963ea51efd7f150450803e3b95db5939f994ad3d5edac2b6f6e2b4"}, + {file = "regex-2023.8.8-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:704f63b774218207b8ccc6c47fcef5340741e5d839d11d606f70af93ee78e4d4"}, + {file = "regex-2023.8.8-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:22283c769a7b01c8ac355d5be0715bf6929b6267619505e289f792b01304d898"}, + {file = "regex-2023.8.8-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:91129ff1bb0619bc1f4ad19485718cc623a2dc433dff95baadbf89405c7f6b57"}, + {file = "regex-2023.8.8-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de35342190deb7b866ad6ba5cbcccb2d22c0487ee0cbb251efef0843d705f0d4"}, + {file = "regex-2023.8.8-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b993b6f524d1e274a5062488a43e3f9f8764ee9745ccd8e8193df743dbe5ee61"}, + {file = "regex-2023.8.8-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:3026cbcf11d79095a32d9a13bbc572a458727bd5b1ca332df4a79faecd45281c"}, + {file = "regex-2023.8.8-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:293352710172239bf579c90a9864d0df57340b6fd21272345222fb6371bf82b3"}, + {file = "regex-2023.8.8-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:d909b5a3fff619dc7e48b6b1bedc2f30ec43033ba7af32f936c10839e81b9217"}, + {file = "regex-2023.8.8-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:3d370ff652323c5307d9c8e4c62efd1956fb08051b0e9210212bc51168b4ff56"}, + {file = "regex-2023.8.8-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:b076da1ed19dc37788f6a934c60adf97bd02c7eea461b73730513921a85d4235"}, + {file = "regex-2023.8.8-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:e9941a4ada58f6218694f382e43fdd256e97615db9da135e77359da257a7168b"}, + {file = "regex-2023.8.8-cp36-cp36m-win32.whl", hash = "sha256:a8c65c17aed7e15a0c824cdc63a6b104dfc530f6fa8cb6ac51c437af52b481c7"}, + {file = "regex-2023.8.8-cp36-cp36m-win_amd64.whl", hash = "sha256:aadf28046e77a72f30dcc1ab185639e8de7f4104b8cb5c6dfa5d8ed860e57236"}, + {file = "regex-2023.8.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:423adfa872b4908843ac3e7a30f957f5d5282944b81ca0a3b8a7ccbbfaa06103"}, + {file = "regex-2023.8.8-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ae594c66f4a7e1ea67232a0846649a7c94c188d6c071ac0210c3e86a5f92109"}, + {file = "regex-2023.8.8-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e51c80c168074faa793685656c38eb7a06cbad7774c8cbc3ea05552d615393d8"}, + {file = "regex-2023.8.8-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:09b7f4c66aa9d1522b06e31a54f15581c37286237208df1345108fcf4e050c18"}, + {file = "regex-2023.8.8-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e73e5243af12d9cd6a9d6a45a43570dbe2e5b1cdfc862f5ae2b031e44dd95a8"}, + {file = "regex-2023.8.8-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:941460db8fe3bd613db52f05259c9336f5a47ccae7d7def44cc277184030a116"}, + {file = "regex-2023.8.8-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f0ccf3e01afeb412a1a9993049cb160d0352dba635bbca7762b2dc722aa5742a"}, + {file = "regex-2023.8.8-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:2e9216e0d2cdce7dbc9be48cb3eacb962740a09b011a116fd7af8c832ab116ca"}, + {file = "regex-2023.8.8-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:5cd9cd7170459b9223c5e592ac036e0704bee765706445c353d96f2890e816c8"}, + {file = "regex-2023.8.8-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:4873ef92e03a4309b3ccd8281454801b291b689f6ad45ef8c3658b6fa761d7ac"}, + {file = "regex-2023.8.8-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:239c3c2a339d3b3ddd51c2daef10874410917cd2b998f043c13e2084cb191684"}, + {file = "regex-2023.8.8-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:1005c60ed7037be0d9dea1f9c53cc42f836188227366370867222bda4c3c6bd7"}, + {file = "regex-2023.8.8-cp37-cp37m-win32.whl", hash = "sha256:e6bd1e9b95bc5614a7a9c9c44fde9539cba1c823b43a9f7bc11266446dd568e3"}, + {file = "regex-2023.8.8-cp37-cp37m-win_amd64.whl", hash = "sha256:9a96edd79661e93327cfeac4edec72a4046e14550a1d22aa0dd2e3ca52aec921"}, + {file = "regex-2023.8.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f2181c20ef18747d5f4a7ea513e09ea03bdd50884a11ce46066bb90fe4213675"}, + {file = "regex-2023.8.8-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a2ad5add903eb7cdde2b7c64aaca405f3957ab34f16594d2b78d53b8b1a6a7d6"}, + {file = "regex-2023.8.8-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9233ac249b354c54146e392e8a451e465dd2d967fc773690811d3a8c240ac601"}, + {file = "regex-2023.8.8-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:920974009fb37b20d32afcdf0227a2e707eb83fe418713f7a8b7de038b870d0b"}, + {file = "regex-2023.8.8-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd2b6c5dfe0929b6c23dde9624483380b170b6e34ed79054ad131b20203a1a63"}, + {file = "regex-2023.8.8-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96979d753b1dc3b2169003e1854dc67bfc86edf93c01e84757927f810b8c3c93"}, + {file = "regex-2023.8.8-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2ae54a338191e1356253e7883d9d19f8679b6143703086245fb14d1f20196be9"}, + {file = "regex-2023.8.8-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:2162ae2eb8b079622176a81b65d486ba50b888271302190870b8cc488587d280"}, + {file = "regex-2023.8.8-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c884d1a59e69e03b93cf0dfee8794c63d7de0ee8f7ffb76e5f75be8131b6400a"}, + {file = "regex-2023.8.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:cf9273e96f3ee2ac89ffcb17627a78f78e7516b08f94dc435844ae72576a276e"}, + {file = "regex-2023.8.8-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:83215147121e15d5f3a45d99abeed9cf1fe16869d5c233b08c56cdf75f43a504"}, + {file = "regex-2023.8.8-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:3f7454aa427b8ab9101f3787eb178057c5250478e39b99540cfc2b889c7d0586"}, + {file = "regex-2023.8.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f0640913d2c1044d97e30d7c41728195fc37e54d190c5385eacb52115127b882"}, + {file = "regex-2023.8.8-cp38-cp38-win32.whl", hash = "sha256:0c59122ceccb905a941fb23b087b8eafc5290bf983ebcb14d2301febcbe199c7"}, + {file = "regex-2023.8.8-cp38-cp38-win_amd64.whl", hash = "sha256:c12f6f67495ea05c3d542d119d270007090bad5b843f642d418eb601ec0fa7be"}, + {file = "regex-2023.8.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:82cd0a69cd28f6cc3789cc6adeb1027f79526b1ab50b1f6062bbc3a0ccb2dbc3"}, + {file = "regex-2023.8.8-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:bb34d1605f96a245fc39790a117ac1bac8de84ab7691637b26ab2c5efb8f228c"}, + {file = "regex-2023.8.8-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:987b9ac04d0b38ef4f89fbc035e84a7efad9cdd5f1e29024f9289182c8d99e09"}, + {file = "regex-2023.8.8-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9dd6082f4e2aec9b6a0927202c85bc1b09dcab113f97265127c1dc20e2e32495"}, + {file = "regex-2023.8.8-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7eb95fe8222932c10d4436e7a6f7c99991e3fdd9f36c949eff16a69246dee2dc"}, + {file = "regex-2023.8.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7098c524ba9f20717a56a8d551d2ed491ea89cbf37e540759ed3b776a4f8d6eb"}, + {file = "regex-2023.8.8-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b694430b3f00eb02c594ff5a16db30e054c1b9589a043fe9174584c6efa8033"}, + {file = "regex-2023.8.8-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b2aeab3895d778155054abea5238d0eb9a72e9242bd4b43f42fd911ef9a13470"}, + {file = "regex-2023.8.8-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:988631b9d78b546e284478c2ec15c8a85960e262e247b35ca5eaf7ee22f6050a"}, + {file = "regex-2023.8.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:67ecd894e56a0c6108ec5ab1d8fa8418ec0cff45844a855966b875d1039a2e34"}, + {file = "regex-2023.8.8-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:14898830f0a0eb67cae2bbbc787c1a7d6e34ecc06fbd39d3af5fe29a4468e2c9"}, + {file = "regex-2023.8.8-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:f2200e00b62568cfd920127782c61bc1c546062a879cdc741cfcc6976668dfcf"}, + {file = "regex-2023.8.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9691a549c19c22d26a4f3b948071e93517bdf86e41b81d8c6ac8a964bb71e5a6"}, + {file = "regex-2023.8.8-cp39-cp39-win32.whl", hash = "sha256:6ab2ed84bf0137927846b37e882745a827458689eb969028af8032b1b3dac78e"}, + {file = "regex-2023.8.8-cp39-cp39-win_amd64.whl", hash = "sha256:5543c055d8ec7801901e1193a51570643d6a6ab8751b1f7dd9af71af467538bb"}, + {file = "regex-2023.8.8.tar.gz", hash = "sha256:fcbdc5f2b0f1cd0f6a56cdb46fe41d2cce1e644e3b68832f3eeebc5fb0f7712e"}, +] + +[[package]] +name = "requests" +version = "2.31.0" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.7" +files = [ + {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, + {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "rpds-py" +version = "0.10.3" +description = "Python bindings to Rust's persistent data structures (rpds)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "rpds_py-0.10.3-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:485747ee62da83366a44fbba963c5fe017860ad408ccd6cd99aa66ea80d32b2e"}, + {file = "rpds_py-0.10.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c55f9821f88e8bee4b7a72c82cfb5ecd22b6aad04033334f33c329b29bfa4da0"}, + {file = "rpds_py-0.10.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3b52a67ac66a3a64a7e710ba629f62d1e26ca0504c29ee8cbd99b97df7079a8"}, + {file = "rpds_py-0.10.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3aed39db2f0ace76faa94f465d4234aac72e2f32b009f15da6492a561b3bbebd"}, + {file = "rpds_py-0.10.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:271c360fdc464fe6a75f13ea0c08ddf71a321f4c55fc20a3fe62ea3ef09df7d9"}, + {file = "rpds_py-0.10.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ef5fddfb264e89c435be4adb3953cef5d2936fdeb4463b4161a6ba2f22e7b740"}, + {file = "rpds_py-0.10.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a771417c9c06c56c9d53d11a5b084d1de75de82978e23c544270ab25e7c066ff"}, + {file = "rpds_py-0.10.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:52b5cbc0469328e58180021138207e6ec91d7ca2e037d3549cc9e34e2187330a"}, + {file = "rpds_py-0.10.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6ac3fefb0d168c7c6cab24fdfc80ec62cd2b4dfd9e65b84bdceb1cb01d385c33"}, + {file = "rpds_py-0.10.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:8d54bbdf5d56e2c8cf81a1857250f3ea132de77af543d0ba5dce667183b61fec"}, + {file = "rpds_py-0.10.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cd2163f42868865597d89399a01aa33b7594ce8e2c4a28503127c81a2f17784e"}, + {file = "rpds_py-0.10.3-cp310-none-win32.whl", hash = "sha256:ea93163472db26ac6043e8f7f93a05d9b59e0505c760da2a3cd22c7dd7111391"}, + {file = "rpds_py-0.10.3-cp310-none-win_amd64.whl", hash = "sha256:7cd020b1fb41e3ab7716d4d2c3972d4588fdfbab9bfbbb64acc7078eccef8860"}, + {file = "rpds_py-0.10.3-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:1d9b5ee46dcb498fa3e46d4dfabcb531e1f2e76b477e0d99ef114f17bbd38453"}, + {file = "rpds_py-0.10.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:563646d74a4b4456d0cf3b714ca522e725243c603e8254ad85c3b59b7c0c4bf0"}, + {file = "rpds_py-0.10.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e626b864725680cd3904414d72e7b0bd81c0e5b2b53a5b30b4273034253bb41f"}, + {file = "rpds_py-0.10.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:485301ee56ce87a51ccb182a4b180d852c5cb2b3cb3a82f7d4714b4141119d8c"}, + {file = "rpds_py-0.10.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:42f712b4668831c0cd85e0a5b5a308700fe068e37dcd24c0062904c4e372b093"}, + {file = "rpds_py-0.10.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6c9141af27a4e5819d74d67d227d5047a20fa3c7d4d9df43037a955b4c748ec5"}, + {file = "rpds_py-0.10.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef750a20de1b65657a1425f77c525b0183eac63fe7b8f5ac0dd16f3668d3e64f"}, + {file = "rpds_py-0.10.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e1a0ffc39f51aa5f5c22114a8f1906b3c17eba68c5babb86c5f77d8b1bba14d1"}, + {file = "rpds_py-0.10.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:f4c179a7aeae10ddf44c6bac87938134c1379c49c884529f090f9bf05566c836"}, + {file = "rpds_py-0.10.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:176287bb998fd1e9846a9b666e240e58f8d3373e3bf87e7642f15af5405187b8"}, + {file = "rpds_py-0.10.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6446002739ca29249f0beaaf067fcbc2b5aab4bc7ee8fb941bd194947ce19aff"}, + {file = "rpds_py-0.10.3-cp311-none-win32.whl", hash = "sha256:c7aed97f2e676561416c927b063802c8a6285e9b55e1b83213dfd99a8f4f9e48"}, + {file = "rpds_py-0.10.3-cp311-none-win_amd64.whl", hash = "sha256:8bd01ff4032abaed03f2db702fa9a61078bee37add0bd884a6190b05e63b028c"}, + {file = "rpds_py-0.10.3-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:4cf0855a842c5b5c391dd32ca273b09e86abf8367572073bd1edfc52bc44446b"}, + {file = "rpds_py-0.10.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:69b857a7d8bd4f5d6e0db4086da8c46309a26e8cefdfc778c0c5cc17d4b11e08"}, + {file = "rpds_py-0.10.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:975382d9aa90dc59253d6a83a5ca72e07f4ada3ae3d6c0575ced513db322b8ec"}, + {file = "rpds_py-0.10.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:35fbd23c1c8732cde7a94abe7fb071ec173c2f58c0bd0d7e5b669fdfc80a2c7b"}, + {file = "rpds_py-0.10.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:106af1653007cc569d5fbb5f08c6648a49fe4de74c2df814e234e282ebc06957"}, + {file = "rpds_py-0.10.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ce5e7504db95b76fc89055c7f41e367eaadef5b1d059e27e1d6eabf2b55ca314"}, + {file = "rpds_py-0.10.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5aca759ada6b1967fcfd4336dcf460d02a8a23e6abe06e90ea7881e5c22c4de6"}, + {file = "rpds_py-0.10.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b5d4bdd697195f3876d134101c40c7d06d46c6ab25159ed5cbd44105c715278a"}, + {file = "rpds_py-0.10.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a657250807b6efd19b28f5922520ae002a54cb43c2401e6f3d0230c352564d25"}, + {file = "rpds_py-0.10.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:177c9dd834cdf4dc39c27436ade6fdf9fe81484758885f2d616d5d03c0a83bd2"}, + {file = "rpds_py-0.10.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e22491d25f97199fc3581ad8dd8ce198d8c8fdb8dae80dea3512e1ce6d5fa99f"}, + {file = "rpds_py-0.10.3-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:2f3e1867dd574014253b4b8f01ba443b9c914e61d45f3674e452a915d6e929a3"}, + {file = "rpds_py-0.10.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c22211c165166de6683de8136229721f3d5c8606cc2c3d1562da9a3a5058049c"}, + {file = "rpds_py-0.10.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40bc802a696887b14c002edd43c18082cb7b6f9ee8b838239b03b56574d97f71"}, + {file = "rpds_py-0.10.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e271dd97c7bb8eefda5cca38cd0b0373a1fea50f71e8071376b46968582af9b"}, + {file = "rpds_py-0.10.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:95cde244e7195b2c07ec9b73fa4c5026d4a27233451485caa1cd0c1b55f26dbd"}, + {file = "rpds_py-0.10.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08a80cf4884920863623a9ee9a285ee04cef57ebedc1cc87b3e3e0f24c8acfe5"}, + {file = "rpds_py-0.10.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:763ad59e105fca09705d9f9b29ecffb95ecdc3b0363be3bb56081b2c6de7977a"}, + {file = "rpds_py-0.10.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:187700668c018a7e76e89424b7c1042f317c8df9161f00c0c903c82b0a8cac5c"}, + {file = "rpds_py-0.10.3-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:5267cfda873ad62591b9332fd9472d2409f7cf02a34a9c9cb367e2c0255994bf"}, + {file = "rpds_py-0.10.3-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:2ed83d53a8c5902ec48b90b2ac045e28e1698c0bea9441af9409fc844dc79496"}, + {file = "rpds_py-0.10.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:255f1a10ae39b52122cce26ce0781f7a616f502feecce9e616976f6a87992d6b"}, + {file = "rpds_py-0.10.3-cp38-none-win32.whl", hash = "sha256:a019a344312d0b1f429c00d49c3be62fa273d4a1094e1b224f403716b6d03be1"}, + {file = "rpds_py-0.10.3-cp38-none-win_amd64.whl", hash = "sha256:efb9ece97e696bb56e31166a9dd7919f8f0c6b31967b454718c6509f29ef6fee"}, + {file = "rpds_py-0.10.3-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:570cc326e78ff23dec7f41487aa9c3dffd02e5ee9ab43a8f6ccc3df8f9327623"}, + {file = "rpds_py-0.10.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cff7351c251c7546407827b6a37bcef6416304fc54d12d44dbfecbb717064717"}, + {file = "rpds_py-0.10.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:177914f81f66c86c012311f8c7f46887ec375cfcfd2a2f28233a3053ac93a569"}, + {file = "rpds_py-0.10.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:448a66b8266de0b581246ca7cd6a73b8d98d15100fb7165974535fa3b577340e"}, + {file = "rpds_py-0.10.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bbac1953c17252f9cc675bb19372444aadf0179b5df575ac4b56faaec9f6294"}, + {file = "rpds_py-0.10.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9dd9d9d9e898b9d30683bdd2b6c1849449158647d1049a125879cb397ee9cd12"}, + {file = "rpds_py-0.10.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8c71ea77536149e36c4c784f6d420ffd20bea041e3ba21ed021cb40ce58e2c9"}, + {file = "rpds_py-0.10.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:16a472300bc6c83fe4c2072cc22b3972f90d718d56f241adabc7ae509f53f154"}, + {file = "rpds_py-0.10.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:b9255e7165083de7c1d605e818025e8860636348f34a79d84ec533546064f07e"}, + {file = "rpds_py-0.10.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:53d7a3cd46cdc1689296348cb05ffd4f4280035770aee0c8ead3bbd4d6529acc"}, + {file = "rpds_py-0.10.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:22da15b902f9f8e267020d1c8bcfc4831ca646fecb60254f7bc71763569f56b1"}, + {file = "rpds_py-0.10.3-cp39-none-win32.whl", hash = "sha256:850c272e0e0d1a5c5d73b1b7871b0a7c2446b304cec55ccdb3eaac0d792bb065"}, + {file = "rpds_py-0.10.3-cp39-none-win_amd64.whl", hash = "sha256:de61e424062173b4f70eec07e12469edde7e17fa180019a2a0d75c13a5c5dc57"}, + {file = "rpds_py-0.10.3-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:af247fd4f12cca4129c1b82090244ea5a9d5bb089e9a82feb5a2f7c6a9fe181d"}, + {file = "rpds_py-0.10.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:3ad59efe24a4d54c2742929001f2d02803aafc15d6d781c21379e3f7f66ec842"}, + {file = "rpds_py-0.10.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:642ed0a209ced4be3a46f8cb094f2d76f1f479e2a1ceca6de6346a096cd3409d"}, + {file = "rpds_py-0.10.3-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:37d0c59548ae56fae01c14998918d04ee0d5d3277363c10208eef8c4e2b68ed6"}, + {file = "rpds_py-0.10.3-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aad6ed9e70ddfb34d849b761fb243be58c735be6a9265b9060d6ddb77751e3e8"}, + {file = "rpds_py-0.10.3-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8f94fdd756ba1f79f988855d948ae0bad9ddf44df296770d9a58c774cfbcca72"}, + {file = "rpds_py-0.10.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77076bdc8776a2b029e1e6ffbe6d7056e35f56f5e80d9dc0bad26ad4a024a762"}, + {file = "rpds_py-0.10.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:87d9b206b1bd7a0523375dc2020a6ce88bca5330682ae2fe25e86fd5d45cea9c"}, + {file = "rpds_py-0.10.3-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:8efaeb08ede95066da3a3e3c420fcc0a21693fcd0c4396d0585b019613d28515"}, + {file = "rpds_py-0.10.3-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:a4d9bfda3f84fc563868fe25ca160c8ff0e69bc4443c5647f960d59400ce6557"}, + {file = "rpds_py-0.10.3-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:d27aa6bbc1f33be920bb7adbb95581452cdf23005d5611b29a12bb6a3468cc95"}, + {file = "rpds_py-0.10.3-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:ed8313809571a5463fd7db43aaca68ecb43ca7a58f5b23b6e6c6c5d02bdc7882"}, + {file = "rpds_py-0.10.3-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:e10e6a1ed2b8661201e79dff5531f8ad4cdd83548a0f81c95cf79b3184b20c33"}, + {file = "rpds_py-0.10.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:015de2ce2af1586ff5dc873e804434185199a15f7d96920ce67e50604592cae9"}, + {file = "rpds_py-0.10.3-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ae87137951bb3dc08c7d8bfb8988d8c119f3230731b08a71146e84aaa919a7a9"}, + {file = "rpds_py-0.10.3-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0bb4f48bd0dd18eebe826395e6a48b7331291078a879295bae4e5d053be50d4c"}, + {file = "rpds_py-0.10.3-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:09362f86ec201288d5687d1dc476b07bf39c08478cde837cb710b302864e7ec9"}, + {file = "rpds_py-0.10.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:821392559d37759caa67d622d0d2994c7a3f2fb29274948ac799d496d92bca73"}, + {file = "rpds_py-0.10.3-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7170cbde4070dc3c77dec82abf86f3b210633d4f89550fa0ad2d4b549a05572a"}, + {file = "rpds_py-0.10.3-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:5de11c041486681ce854c814844f4ce3282b6ea1656faae19208ebe09d31c5b8"}, + {file = "rpds_py-0.10.3-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:4ed172d0c79f156c1b954e99c03bc2e3033c17efce8dd1a7c781bc4d5793dfac"}, + {file = "rpds_py-0.10.3-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:11fdd1192240dda8d6c5d18a06146e9045cb7e3ba7c06de6973000ff035df7c6"}, + {file = "rpds_py-0.10.3-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:f602881d80ee4228a2355c68da6b296a296cd22bbb91e5418d54577bbf17fa7c"}, + {file = "rpds_py-0.10.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:691d50c99a937709ac4c4cd570d959a006bd6a6d970a484c84cc99543d4a5bbb"}, + {file = "rpds_py-0.10.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:24cd91a03543a0f8d09cb18d1cb27df80a84b5553d2bd94cba5979ef6af5c6e7"}, + {file = "rpds_py-0.10.3-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fc2200e79d75b5238c8d69f6a30f8284290c777039d331e7340b6c17cad24a5a"}, + {file = "rpds_py-0.10.3-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ea65b59882d5fa8c74a23f8960db579e5e341534934f43f3b18ec1839b893e41"}, + {file = "rpds_py-0.10.3-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:829e91f3a8574888b73e7a3feb3b1af698e717513597e23136ff4eba0bc8387a"}, + {file = "rpds_py-0.10.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eab75a8569a095f2ad470b342f2751d9902f7944704f0571c8af46bede438475"}, + {file = "rpds_py-0.10.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:061c3ff1f51ecec256e916cf71cc01f9975af8fb3af9b94d3c0cc8702cfea637"}, + {file = "rpds_py-0.10.3-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:39d05e65f23a0fe897b6ac395f2a8d48c56ac0f583f5d663e0afec1da89b95da"}, + {file = "rpds_py-0.10.3-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:4eca20917a06d2fca7628ef3c8b94a8c358f6b43f1a621c9815243462dcccf97"}, + {file = "rpds_py-0.10.3-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:e8d0f0eca087630d58b8c662085529781fd5dc80f0a54eda42d5c9029f812599"}, + {file = "rpds_py-0.10.3.tar.gz", hash = "sha256:fcc1ebb7561a3e24a6588f7c6ded15d80aec22c66a070c757559b57b17ffd1cb"}, +] + +[[package]] +name = "sniffio" +version = "1.3.0" +description = "Sniff out which async library your code is running under" +optional = false +python-versions = ">=3.7" +files = [ + {file = "sniffio-1.3.0-py3-none-any.whl", hash = "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"}, + {file = "sniffio-1.3.0.tar.gz", hash = "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101"}, +] + +[[package]] +name = "starlette" +version = "0.27.0" +description = "The little ASGI library that shines." +optional = false +python-versions = ">=3.7" +files = [ + {file = "starlette-0.27.0-py3-none-any.whl", hash = "sha256:918416370e846586541235ccd38a474c08b80443ed31c578a418e2209b3eef91"}, + {file = "starlette-0.27.0.tar.gz", hash = "sha256:6a6b0d042acb8d469a01eba54e9cda6cbd24ac602c4cd016723117d6a7e73b75"}, +] + +[package.dependencies] +anyio = ">=3.4.0,<5" + +[package.extras] +full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart", "pyyaml"] + +[[package]] +name = "tiktoken" +version = "0.5.1" +description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models" +optional = false +python-versions = ">=3.8" +files = [ + {file = "tiktoken-0.5.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2b0bae3fd56de1c0a5874fb6577667a3c75bf231a6cef599338820210c16e40a"}, + {file = "tiktoken-0.5.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e529578d017045e2f0ed12d2e00e7e99f780f477234da4aae799ec4afca89f37"}, + {file = "tiktoken-0.5.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edd2ffbb789712d83fee19ab009949f998a35c51ad9f9beb39109357416344ff"}, + {file = "tiktoken-0.5.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4c73d47bdc1a3f1f66ffa019af0386c48effdc6e8797e5e76875f6388ff72e9"}, + {file = "tiktoken-0.5.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:46b8554b9f351561b1989157c6bb54462056f3d44e43aa4e671367c5d62535fc"}, + {file = "tiktoken-0.5.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:92ed3bbf71a175a6a4e5fbfcdb2c422bdd72d9b20407e00f435cf22a68b4ea9b"}, + {file = "tiktoken-0.5.1-cp310-cp310-win_amd64.whl", hash = "sha256:714efb2f4a082635d9f5afe0bf7e62989b72b65ac52f004eb7ac939f506c03a4"}, + {file = "tiktoken-0.5.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a10488d1d1a5f9c9d2b2052fdb4cf807bba545818cb1ef724a7f5d44d9f7c3d4"}, + {file = "tiktoken-0.5.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8079ac065572fe0e7c696dbd63e1fdc12ce4cdca9933935d038689d4732451df"}, + {file = "tiktoken-0.5.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ef730db4097f5b13df8d960f7fdda2744fe21d203ea2bb80c120bb58661b155"}, + {file = "tiktoken-0.5.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:426e7def5f3f23645dada816be119fa61e587dfb4755de250e136b47a045c365"}, + {file = "tiktoken-0.5.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:323cec0031358bc09aa965c2c5c1f9f59baf76e5b17e62dcc06d1bb9bc3a3c7c"}, + {file = "tiktoken-0.5.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5abd9436f02e2c8eda5cce2ff8015ce91f33e782a7423de2a1859f772928f714"}, + {file = "tiktoken-0.5.1-cp311-cp311-win_amd64.whl", hash = "sha256:1fe99953b63aabc0c9536fbc91c3c9000d78e4755edc28cc2e10825372046a2d"}, + {file = "tiktoken-0.5.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:dcdc630461927718b317e6f8be7707bd0fc768cee1fdc78ddaa1e93f4dc6b2b1"}, + {file = "tiktoken-0.5.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1f2b3b253e22322b7f53a111e1f6d7ecfa199b4f08f3efdeb0480f4033b5cdc6"}, + {file = "tiktoken-0.5.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:43ce0199f315776dec3ea7bf86f35df86d24b6fcde1babd3e53c38f17352442f"}, + {file = "tiktoken-0.5.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a84657c083d458593c0235926b5c993eec0b586a2508d6a2020556e5347c2f0d"}, + {file = "tiktoken-0.5.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c008375c0f3d97c36e81725308699116cd5804fdac0f9b7afc732056329d2790"}, + {file = "tiktoken-0.5.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:779c4dea5edd1d3178734d144d32231e0b814976bec1ec09636d1003ffe4725f"}, + {file = "tiktoken-0.5.1-cp38-cp38-win_amd64.whl", hash = "sha256:b5dcfcf9bfb798e86fbce76d40a1d5d9e3f92131aecfa3d1e5c9ea1a20f1ef1a"}, + {file = "tiktoken-0.5.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9b180a22db0bbcc447f691ffc3cf7a580e9e0587d87379e35e58b826ebf5bc7b"}, + {file = "tiktoken-0.5.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2b756a65d98b7cf760617a6b68762a23ab8b6ef79922be5afdb00f5e8a9f4e76"}, + {file = "tiktoken-0.5.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba9873c253ca1f670e662192a0afcb72b41e0ba3e730f16c665099e12f4dac2d"}, + {file = "tiktoken-0.5.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:74c90d2be0b4c1a2b3f7dde95cd976757817d4df080d6af0ee8d461568c2e2ad"}, + {file = "tiktoken-0.5.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:709a5220891f2b56caad8327fab86281787704931ed484d9548f65598dea9ce4"}, + {file = "tiktoken-0.5.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5d5a187ff9c786fae6aadd49f47f019ff19e99071dc5b0fe91bfecc94d37c686"}, + {file = "tiktoken-0.5.1-cp39-cp39-win_amd64.whl", hash = "sha256:e21840043dbe2e280e99ad41951c00eff8ee3b63daf57cd4c1508a3fd8583ea2"}, + {file = "tiktoken-0.5.1.tar.gz", hash = "sha256:27e773564232004f4f810fd1f85236673ec3a56ed7f1206fc9ed8670ebedb97a"}, +] + +[package.dependencies] +regex = ">=2022.1.18" +requests = ">=2.26.0" + +[package.extras] +blobfile = ["blobfile (>=2)"] + +[[package]] +name = "tomli" +version = "2.0.1" +description = "A lil' TOML parser" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, +] + +[[package]] +name = "tqdm" +version = "4.66.1" +description = "Fast, Extensible Progress Meter" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tqdm-4.66.1-py3-none-any.whl", hash = "sha256:d302b3c5b53d47bce91fea46679d9c3c6508cf6332229aa1e7d8653723793386"}, + {file = "tqdm-4.66.1.tar.gz", hash = "sha256:d88e651f9db8d8551a62556d3cff9e3034274ca5d66e93197cf2490e2dcb69c7"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[package.extras] +dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"] +notebook = ["ipywidgets (>=6)"] +slack = ["slack-sdk"] +telegram = ["requests"] + +[[package]] +name = "typing-extensions" +version = "4.8.0" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" +files = [ + {file = "typing_extensions-4.8.0-py3-none-any.whl", hash = "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0"}, + {file = "typing_extensions-4.8.0.tar.gz", hash = "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef"}, +] + +[[package]] +name = "urllib3" +version = "2.0.5" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.7" +files = [ + {file = "urllib3-2.0.5-py3-none-any.whl", hash = "sha256:ef16afa8ba34a1f989db38e1dbbe0c302e4289a47856990d0682e374563ce35e"}, + {file = "urllib3-2.0.5.tar.gz", hash = "sha256:13abf37382ea2ce6fb744d4dad67838eec857c9f4f57009891805e0b5e123594"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +secure = ["certifi", "cryptography (>=1.9)", "idna (>=2.0.0)", "pyopenssl (>=17.1.0)", "urllib3-secure-extra"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "wsproto" +version = "1.2.0" +description = "WebSockets state-machine based protocol implementation" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "wsproto-1.2.0-py3-none-any.whl", hash = "sha256:b9acddd652b585d75b20477888c56642fdade28bdfd3579aa24a4d2c037dd736"}, + {file = "wsproto-1.2.0.tar.gz", hash = "sha256:ad565f26ecb92588a3e43bc3d96164de84cd9902482b130d0ddbaa9664a85065"}, +] + +[package.dependencies] +h11 = ">=0.9.0,<1" + +[[package]] +name = "yarl" +version = "1.9.2" +description = "Yet another URL library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "yarl-1.9.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8c2ad583743d16ddbdf6bb14b5cd76bf43b0d0006e918809d5d4ddf7bde8dd82"}, + {file = "yarl-1.9.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:82aa6264b36c50acfb2424ad5ca537a2060ab6de158a5bd2a72a032cc75b9eb8"}, + {file = "yarl-1.9.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c0c77533b5ed4bcc38e943178ccae29b9bcf48ffd1063f5821192f23a1bd27b9"}, + {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee4afac41415d52d53a9833ebae7e32b344be72835bbb589018c9e938045a560"}, + {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9bf345c3a4f5ba7f766430f97f9cc1320786f19584acc7086491f45524a551ac"}, + {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a96c19c52ff442a808c105901d0bdfd2e28575b3d5f82e2f5fd67e20dc5f4ea"}, + {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:891c0e3ec5ec881541f6c5113d8df0315ce5440e244a716b95f2525b7b9f3608"}, + {file = "yarl-1.9.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c3a53ba34a636a256d767c086ceb111358876e1fb6b50dfc4d3f4951d40133d5"}, + {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:566185e8ebc0898b11f8026447eacd02e46226716229cea8db37496c8cdd26e0"}, + {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2b0738fb871812722a0ac2154be1f049c6223b9f6f22eec352996b69775b36d4"}, + {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:32f1d071b3f362c80f1a7d322bfd7b2d11e33d2adf395cc1dd4df36c9c243095"}, + {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:e9fdc7ac0d42bc3ea78818557fab03af6181e076a2944f43c38684b4b6bed8e3"}, + {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:56ff08ab5df8429901ebdc5d15941b59f6253393cb5da07b4170beefcf1b2528"}, + {file = "yarl-1.9.2-cp310-cp310-win32.whl", hash = "sha256:8ea48e0a2f931064469bdabca50c2f578b565fc446f302a79ba6cc0ee7f384d3"}, + {file = "yarl-1.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:50f33040f3836e912ed16d212f6cc1efb3231a8a60526a407aeb66c1c1956dde"}, + {file = "yarl-1.9.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:646d663eb2232d7909e6601f1a9107e66f9791f290a1b3dc7057818fe44fc2b6"}, + {file = "yarl-1.9.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:aff634b15beff8902d1f918012fc2a42e0dbae6f469fce134c8a0dc51ca423bb"}, + {file = "yarl-1.9.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a83503934c6273806aed765035716216cc9ab4e0364f7f066227e1aaea90b8d0"}, + {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b25322201585c69abc7b0e89e72790469f7dad90d26754717f3310bfe30331c2"}, + {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:22a94666751778629f1ec4280b08eb11815783c63f52092a5953faf73be24191"}, + {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ec53a0ea2a80c5cd1ab397925f94bff59222aa3cf9c6da938ce05c9ec20428d"}, + {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:159d81f22d7a43e6eabc36d7194cb53f2f15f498dbbfa8edc8a3239350f59fe7"}, + {file = "yarl-1.9.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:832b7e711027c114d79dffb92576acd1bd2decc467dec60e1cac96912602d0e6"}, + {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:95d2ecefbcf4e744ea952d073c6922e72ee650ffc79028eb1e320e732898d7e8"}, + {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d4e2c6d555e77b37288eaf45b8f60f0737c9efa3452c6c44626a5455aeb250b9"}, + {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:783185c75c12a017cc345015ea359cc801c3b29a2966c2655cd12b233bf5a2be"}, + {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:b8cc1863402472f16c600e3e93d542b7e7542a540f95c30afd472e8e549fc3f7"}, + {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:822b30a0f22e588b32d3120f6d41e4ed021806418b4c9f0bc3048b8c8cb3f92a"}, + {file = "yarl-1.9.2-cp311-cp311-win32.whl", hash = "sha256:a60347f234c2212a9f0361955007fcf4033a75bf600a33c88a0a8e91af77c0e8"}, + {file = "yarl-1.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:be6b3fdec5c62f2a67cb3f8c6dbf56bbf3f61c0f046f84645cd1ca73532ea051"}, + {file = "yarl-1.9.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:38a3928ae37558bc1b559f67410df446d1fbfa87318b124bf5032c31e3447b74"}, + {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac9bb4c5ce3975aeac288cfcb5061ce60e0d14d92209e780c93954076c7c4367"}, + {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3da8a678ca8b96c8606bbb8bfacd99a12ad5dd288bc6f7979baddd62f71c63ef"}, + {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:13414591ff516e04fcdee8dc051c13fd3db13b673c7a4cb1350e6b2ad9639ad3"}, + {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf74d08542c3a9ea97bb8f343d4fcbd4d8f91bba5ec9d5d7f792dbe727f88938"}, + {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e7221580dc1db478464cfeef9b03b95c5852cc22894e418562997df0d074ccc"}, + {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:494053246b119b041960ddcd20fd76224149cfea8ed8777b687358727911dd33"}, + {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:52a25809fcbecfc63ac9ba0c0fb586f90837f5425edfd1ec9f3372b119585e45"}, + {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:e65610c5792870d45d7b68c677681376fcf9cc1c289f23e8e8b39c1485384185"}, + {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:1b1bba902cba32cdec51fca038fd53f8beee88b77efc373968d1ed021024cc04"}, + {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:662e6016409828ee910f5d9602a2729a8a57d74b163c89a837de3fea050c7582"}, + {file = "yarl-1.9.2-cp37-cp37m-win32.whl", hash = "sha256:f364d3480bffd3aa566e886587eaca7c8c04d74f6e8933f3f2c996b7f09bee1b"}, + {file = "yarl-1.9.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6a5883464143ab3ae9ba68daae8e7c5c95b969462bbe42e2464d60e7e2698368"}, + {file = "yarl-1.9.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5610f80cf43b6202e2c33ba3ec2ee0a2884f8f423c8f4f62906731d876ef4fac"}, + {file = "yarl-1.9.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b9a4e67ad7b646cd6f0938c7ebfd60e481b7410f574c560e455e938d2da8e0f4"}, + {file = "yarl-1.9.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:83fcc480d7549ccebe9415d96d9263e2d4226798c37ebd18c930fce43dfb9574"}, + {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fcd436ea16fee7d4207c045b1e340020e58a2597301cfbcfdbe5abd2356c2fb"}, + {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84e0b1599334b1e1478db01b756e55937d4614f8654311eb26012091be109d59"}, + {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3458a24e4ea3fd8930e934c129b676c27452e4ebda80fbe47b56d8c6c7a63a9e"}, + {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:838162460b3a08987546e881a2bfa573960bb559dfa739e7800ceeec92e64417"}, + {file = "yarl-1.9.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f4e2d08f07a3d7d3e12549052eb5ad3eab1c349c53ac51c209a0e5991bbada78"}, + {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:de119f56f3c5f0e2fb4dee508531a32b069a5f2c6e827b272d1e0ff5ac040333"}, + {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:149ddea5abf329752ea5051b61bd6c1d979e13fbf122d3a1f9f0c8be6cb6f63c"}, + {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:674ca19cbee4a82c9f54e0d1eee28116e63bc6fd1e96c43031d11cbab8b2afd5"}, + {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:9b3152f2f5677b997ae6c804b73da05a39daa6a9e85a512e0e6823d81cdad7cc"}, + {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5415d5a4b080dc9612b1b63cba008db84e908b95848369aa1da3686ae27b6d2b"}, + {file = "yarl-1.9.2-cp38-cp38-win32.whl", hash = "sha256:f7a3d8146575e08c29ed1cd287068e6d02f1c7bdff8970db96683b9591b86ee7"}, + {file = "yarl-1.9.2-cp38-cp38-win_amd64.whl", hash = "sha256:63c48f6cef34e6319a74c727376e95626f84ea091f92c0250a98e53e62c77c72"}, + {file = "yarl-1.9.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:75df5ef94c3fdc393c6b19d80e6ef1ecc9ae2f4263c09cacb178d871c02a5ba9"}, + {file = "yarl-1.9.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c027a6e96ef77d401d8d5a5c8d6bc478e8042f1e448272e8d9752cb0aff8b5c8"}, + {file = "yarl-1.9.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3b078dbe227f79be488ffcfc7a9edb3409d018e0952cf13f15fd6512847f3f7"}, + {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59723a029760079b7d991a401386390c4be5bfec1e7dd83e25a6a0881859e716"}, + {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b03917871bf859a81ccb180c9a2e6c1e04d2f6a51d953e6a5cdd70c93d4e5a2a"}, + {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c1012fa63eb6c032f3ce5d2171c267992ae0c00b9e164efe4d73db818465fac3"}, + {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a74dcbfe780e62f4b5a062714576f16c2f3493a0394e555ab141bf0d746bb955"}, + {file = "yarl-1.9.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c56986609b057b4839968ba901944af91b8e92f1725d1a2d77cbac6972b9ed1"}, + {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2c315df3293cd521033533d242d15eab26583360b58f7ee5d9565f15fee1bef4"}, + {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:b7232f8dfbd225d57340e441d8caf8652a6acd06b389ea2d3222b8bc89cbfca6"}, + {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:53338749febd28935d55b41bf0bcc79d634881195a39f6b2f767870b72514caf"}, + {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:066c163aec9d3d073dc9ffe5dd3ad05069bcb03fcaab8d221290ba99f9f69ee3"}, + {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8288d7cd28f8119b07dd49b7230d6b4562f9b61ee9a4ab02221060d21136be80"}, + {file = "yarl-1.9.2-cp39-cp39-win32.whl", hash = "sha256:b124e2a6d223b65ba8768d5706d103280914d61f5cae3afbc50fc3dfcc016623"}, + {file = "yarl-1.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:61016e7d582bc46a5378ffdd02cd0314fb8ba52f40f9cf4d9a5e7dbef88dee18"}, + {file = "yarl-1.9.2.tar.gz", hash = "sha256:04ab9d4b9f587c06d801c2abfe9317b77cdf996c65a90d5e84ecc45010823571"}, +] + +[package.dependencies] +idna = ">=2.0" +multidict = ">=4.0" + +[metadata] +lock-version = "2.0" +python-versions = "^3.10" +content-hash = "e5acc4decd67692ad0f08e38d380e1a474ef480449b78dd14321dccf1ad3ca5a" diff --git a/autogpts/autogpt/autogpt/core/prompting/__init__.py b/autogpts/autogpt/autogpt/core/prompting/__init__.py new file mode 100644 index 000000000000..305c3568564d --- /dev/null +++ b/autogpts/autogpt/autogpt/core/prompting/__init__.py @@ -0,0 +1,8 @@ +from .base import PromptStrategy +from .schema import ChatPrompt, LanguageModelClassification + +__all__ = [ + "LanguageModelClassification", + "ChatPrompt", + "PromptStrategy", +] diff --git a/autogpts/autogpt/autogpt/core/prompting/base.py b/autogpts/autogpt/autogpt/core/prompting/base.py new file mode 100644 index 000000000000..19e315f6970e --- /dev/null +++ b/autogpts/autogpt/autogpt/core/prompting/base.py @@ -0,0 +1,23 @@ +import abc + +from autogpt.core.configuration import SystemConfiguration +from autogpt.core.resource.model_providers import AssistantChatMessage + +from .schema import ChatPrompt, LanguageModelClassification + + +class PromptStrategy(abc.ABC): + default_configuration: SystemConfiguration + + @property + @abc.abstractmethod + def model_classification(self) -> LanguageModelClassification: + ... + + @abc.abstractmethod + def build_prompt(self, *_, **kwargs) -> ChatPrompt: + ... + + @abc.abstractmethod + def parse_response_content(self, response_content: AssistantChatMessage): + ... diff --git a/autogpts/autogpt/autogpt/core/prompting/schema.py b/autogpts/autogpt/autogpt/core/prompting/schema.py new file mode 100644 index 000000000000..45efc40fea8b --- /dev/null +++ b/autogpts/autogpt/autogpt/core/prompting/schema.py @@ -0,0 +1,34 @@ +import enum + +from pydantic import BaseModel, Field + +from autogpt.core.resource.model_providers.schema import ( + ChatMessage, + ChatMessageDict, + CompletionModelFunction, +) + + +class LanguageModelClassification(str, enum.Enum): + """The LanguageModelClassification is a functional description of the model. + + This is used to determine what kind of model to use for a given prompt. + Sometimes we prefer a faster or cheaper model to accomplish a task when + possible. + """ + + FAST_MODEL = "fast_model" + SMART_MODEL = "smart_model" + + +class ChatPrompt(BaseModel): + messages: list[ChatMessage] + functions: list[CompletionModelFunction] = Field(default_factory=list) + + def raw(self) -> list[ChatMessageDict]: + return [m.dict() for m in self.messages] + + def __str__(self): + return "\n\n".join( + f"{m.role.value.upper()}: {m.content}" for m in self.messages + ) diff --git a/autogpts/autogpt/autogpt/core/prompting/utils.py b/autogpts/autogpt/autogpt/core/prompting/utils.py new file mode 100644 index 000000000000..865b3fc081e8 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/prompting/utils.py @@ -0,0 +1,9 @@ +def to_numbered_list( + items: list[str], no_items_response: str = "", **template_args +) -> str: + if items: + return "\n".join( + f"{i+1}. {item.format(**template_args)}" for i, item in enumerate(items) + ) + else: + return no_items_response diff --git a/autogpts/autogpt/autogpt/core/pyproject.toml b/autogpts/autogpt/autogpt/core/pyproject.toml new file mode 100644 index 000000000000..059a6bc76061 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/pyproject.toml @@ -0,0 +1,77 @@ +[tool.poetry] +name = "agpt" +version = "1.0.0" +authors = ["Significant Gravitas "] +maintainers = ["Reinier van der Leer "] +description = "An open-source attempt at an autonomous generalist agent" +readme = "README.md" +repository = "https://github.com/Significant-Gravitas/AutoGPT/tree/master/autogpts/agpt" +# documentation = "https://docs.agpt.co/autogpts/agpt" # TODO +classifiers = [ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", +] +packages = [{ include = "autogpt/core", from = "../.." }] + +[tool.poetry.scripts] +cli = "autogpt.core.runner.cli_app.cli:autogpt" +cli-web = "autogpt.core.runner.cli_web_app.cli:autogpt" + +[tool.poetry.dependencies] +python = "^3.10" +agent-protocol = "^0.3.0" +click = "^8.1.7" +colorama = "^0.4.6" +distro = "^1.8.0" +inflection = "^0.5.1" +jsonschema = "^4.19.1" +openai = "^0.28.0" +pydantic = "^1.10.12" +pyyaml = "^6.0.0" +tiktoken = "^0.5.1" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" + + +[tool.black] +line-length = 88 +target-version = ['py310'] +include = '\.pyi?$' +packages = ["autogpt"] +extend-exclude = '.+/(dist|.venv|venv|build)/.+' + +[tool.isort] +profile = "black" +multi_line_output = 3 +include_trailing_comma = true +force_grid_wrap = 0 +use_parentheses = true +ensure_newline_before_comments = true +line_length = 88 +sections = [ + "FUTURE", + "STDLIB", + "THIRDPARTY", + "FIRSTPARTY", + "LOCALFOLDER" +] +skip = ''' + .tox + __pycache__ + *.pyc + .env + venv*/* + .venv/* + reports/* + dist/* + +''' + +[tool.pytest.ini_options] +markers = [ + "requires_openai_api_key", + "requires_huggingface_api_key" +] diff --git a/autogpts/autogpt/autogpt/core/resource/__init__.py b/autogpts/autogpt/autogpt/core/resource/__init__.py new file mode 100644 index 000000000000..897e08777e2d --- /dev/null +++ b/autogpts/autogpt/autogpt/core/resource/__init__.py @@ -0,0 +1,15 @@ +from autogpt.core.resource.schema import ( + ProviderBudget, + ProviderCredentials, + ProviderSettings, + ProviderUsage, + ResourceType, +) + +__all__ = [ + "ProviderBudget", + "ProviderCredentials", + "ProviderSettings", + "ProviderUsage", + "ResourceType", +] diff --git a/autogpts/autogpt/autogpt/core/resource/model_providers/__init__.py b/autogpts/autogpt/autogpt/core/resource/model_providers/__init__.py new file mode 100644 index 000000000000..b896760d20ba --- /dev/null +++ b/autogpts/autogpt/autogpt/core/resource/model_providers/__init__.py @@ -0,0 +1,65 @@ +from .openai import ( + OPEN_AI_CHAT_MODELS, + OPEN_AI_EMBEDDING_MODELS, + OPEN_AI_MODELS, + OpenAIModelName, + OpenAIProvider, + OpenAISettings, +) +from .schema import ( + AssistantChatMessage, + AssistantChatMessageDict, + AssistantFunctionCall, + AssistantFunctionCallDict, + ChatMessage, + ChatModelInfo, + ChatModelProvider, + ChatModelResponse, + CompletionModelFunction, + Embedding, + EmbeddingModelInfo, + EmbeddingModelProvider, + EmbeddingModelResponse, + ModelInfo, + ModelProvider, + ModelProviderBudget, + ModelProviderCredentials, + ModelProviderName, + ModelProviderService, + ModelProviderSettings, + ModelProviderUsage, + ModelResponse, + ModelTokenizer, +) + +__all__ = [ + "AssistantChatMessage", + "AssistantChatMessageDict", + "AssistantFunctionCall", + "AssistantFunctionCallDict", + "ChatMessage", + "ChatModelInfo", + "ChatModelProvider", + "ChatModelResponse", + "CompletionModelFunction", + "Embedding", + "EmbeddingModelInfo", + "EmbeddingModelProvider", + "EmbeddingModelResponse", + "ModelInfo", + "ModelProvider", + "ModelProviderBudget", + "ModelProviderCredentials", + "ModelProviderName", + "ModelProviderService", + "ModelProviderSettings", + "ModelProviderUsage", + "ModelResponse", + "ModelTokenizer", + "OPEN_AI_MODELS", + "OPEN_AI_CHAT_MODELS", + "OPEN_AI_EMBEDDING_MODELS", + "OpenAIModelName", + "OpenAIProvider", + "OpenAISettings", +] diff --git a/autogpts/autogpt/autogpt/core/resource/model_providers/openai.py b/autogpts/autogpt/autogpt/core/resource/model_providers/openai.py new file mode 100644 index 000000000000..5e94ef337c31 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/resource/model_providers/openai.py @@ -0,0 +1,879 @@ +import enum +import logging +import os +from pathlib import Path +from typing import Any, Callable, Coroutine, Iterator, Optional, ParamSpec, TypeVar + +import sentry_sdk +import tenacity +import tiktoken +import yaml +from openai._exceptions import APIStatusError, RateLimitError +from openai.types import CreateEmbeddingResponse +from openai.types.chat import ( + ChatCompletion, + ChatCompletionMessage, + ChatCompletionMessageParam, +) +from pydantic import SecretStr + +from autogpt.core.configuration import Configurable, UserConfigurable +from autogpt.core.resource.model_providers.schema import ( + AssistantChatMessage, + AssistantFunctionCall, + AssistantToolCall, + AssistantToolCallDict, + ChatMessage, + ChatModelInfo, + ChatModelProvider, + ChatModelResponse, + CompletionModelFunction, + Embedding, + EmbeddingModelInfo, + EmbeddingModelProvider, + EmbeddingModelResponse, + ModelProviderBudget, + ModelProviderConfiguration, + ModelProviderCredentials, + ModelProviderName, + ModelProviderSettings, + ModelTokenizer, +) +from autogpt.core.utils.json_schema import JSONSchema +from autogpt.core.utils.json_utils import json_loads + +_T = TypeVar("_T") +_P = ParamSpec("_P") + +OpenAIEmbeddingParser = Callable[[Embedding], Embedding] + + +class OpenAIModelName(str, enum.Enum): + EMBEDDING_v2 = "text-embedding-ada-002" + EMBEDDING_v3_S = "text-embedding-3-small" + EMBEDDING_v3_L = "text-embedding-3-large" + + GPT3_v1 = "gpt-3.5-turbo-0301" + GPT3_v2 = "gpt-3.5-turbo-0613" + GPT3_v2_16k = "gpt-3.5-turbo-16k-0613" + GPT3_v3 = "gpt-3.5-turbo-1106" + GPT3_v4 = "gpt-3.5-turbo-0125" + GPT3_ROLLING = "gpt-3.5-turbo" + GPT3_ROLLING_16k = "gpt-3.5-turbo-16k" + GPT3 = GPT3_ROLLING + GPT3_16k = GPT3_ROLLING_16k + + GPT4_v1 = "gpt-4-0314" + GPT4_v1_32k = "gpt-4-32k-0314" + GPT4_v2 = "gpt-4-0613" + GPT4_v2_32k = "gpt-4-32k-0613" + GPT4_v3 = "gpt-4-1106-preview" + GPT4_v3_VISION = "gpt-4-1106-vision-preview" + GPT4_v4 = "gpt-4-0125-preview" + GPT4_v5 = "gpt-4-turbo-2024-04-09" + GPT4_ROLLING = "gpt-4" + GPT4_ROLLING_32k = "gpt-4-32k" + GPT4_TURBO = "gpt-4-turbo" + GPT4_TURBO_PREVIEW = "gpt-4-turbo-preview" + GPT4_VISION = "gpt-4-vision-preview" + GPT4 = GPT4_ROLLING + GPT4_32k = GPT4_ROLLING_32k + + +OPEN_AI_EMBEDDING_MODELS = { + info.name: info + for info in [ + EmbeddingModelInfo( + name=OpenAIModelName.EMBEDDING_v2, + provider_name=ModelProviderName.OPENAI, + prompt_token_cost=0.0001 / 1000, + max_tokens=8191, + embedding_dimensions=1536, + ), + EmbeddingModelInfo( + name=OpenAIModelName.EMBEDDING_v3_S, + provider_name=ModelProviderName.OPENAI, + prompt_token_cost=0.00002 / 1000, + max_tokens=8191, + embedding_dimensions=1536, + ), + EmbeddingModelInfo( + name=OpenAIModelName.EMBEDDING_v3_L, + provider_name=ModelProviderName.OPENAI, + prompt_token_cost=0.00013 / 1000, + max_tokens=8191, + embedding_dimensions=3072, + ), + ] +} + + +OPEN_AI_CHAT_MODELS = { + info.name: info + for info in [ + ChatModelInfo( + name=OpenAIModelName.GPT3_v1, + provider_name=ModelProviderName.OPENAI, + prompt_token_cost=0.0015 / 1000, + completion_token_cost=0.002 / 1000, + max_tokens=4096, + has_function_call_api=True, + ), + ChatModelInfo( + name=OpenAIModelName.GPT3_v2_16k, + provider_name=ModelProviderName.OPENAI, + prompt_token_cost=0.003 / 1000, + completion_token_cost=0.004 / 1000, + max_tokens=16384, + has_function_call_api=True, + ), + ChatModelInfo( + name=OpenAIModelName.GPT3_v3, + provider_name=ModelProviderName.OPENAI, + prompt_token_cost=0.001 / 1000, + completion_token_cost=0.002 / 1000, + max_tokens=16384, + has_function_call_api=True, + ), + ChatModelInfo( + name=OpenAIModelName.GPT3_v4, + provider_name=ModelProviderName.OPENAI, + prompt_token_cost=0.0005 / 1000, + completion_token_cost=0.0015 / 1000, + max_tokens=16384, + has_function_call_api=True, + ), + ChatModelInfo( + name=OpenAIModelName.GPT4_v1, + provider_name=ModelProviderName.OPENAI, + prompt_token_cost=0.03 / 1000, + completion_token_cost=0.06 / 1000, + max_tokens=8191, + has_function_call_api=True, + ), + ChatModelInfo( + name=OpenAIModelName.GPT4_v1_32k, + provider_name=ModelProviderName.OPENAI, + prompt_token_cost=0.06 / 1000, + completion_token_cost=0.12 / 1000, + max_tokens=32768, + has_function_call_api=True, + ), + ChatModelInfo( + name=OpenAIModelName.GPT4_TURBO, + provider_name=ModelProviderName.OPENAI, + prompt_token_cost=0.01 / 1000, + completion_token_cost=0.03 / 1000, + max_tokens=128000, + has_function_call_api=True, + ), + ] +} +# Copy entries for models with equivalent specs +chat_model_mapping = { + OpenAIModelName.GPT3_v1: [OpenAIModelName.GPT3_v2], + OpenAIModelName.GPT3_v2_16k: [OpenAIModelName.GPT3_16k], + OpenAIModelName.GPT3_v4: [OpenAIModelName.GPT3_ROLLING], + OpenAIModelName.GPT4_v1: [OpenAIModelName.GPT4_v2, OpenAIModelName.GPT4_ROLLING], + OpenAIModelName.GPT4_v1_32k: [ + OpenAIModelName.GPT4_v2_32k, + OpenAIModelName.GPT4_32k, + ], + OpenAIModelName.GPT4_TURBO: [ + OpenAIModelName.GPT4_v3, + OpenAIModelName.GPT4_v3_VISION, + OpenAIModelName.GPT4_VISION, + OpenAIModelName.GPT4_v4, + OpenAIModelName.GPT4_TURBO_PREVIEW, + OpenAIModelName.GPT4_v5, + ], +} +for base, copies in chat_model_mapping.items(): + for copy in copies: + copy_info = OPEN_AI_CHAT_MODELS[base].copy(update={"name": copy}) + OPEN_AI_CHAT_MODELS[copy] = copy_info + if copy.endswith(("-0301", "-0314")): + copy_info.has_function_call_api = False + + +OPEN_AI_MODELS = { + **OPEN_AI_CHAT_MODELS, + **OPEN_AI_EMBEDDING_MODELS, +} + + +class OpenAIConfiguration(ModelProviderConfiguration): + fix_failed_parse_tries: int = UserConfigurable(3) + + +class OpenAICredentials(ModelProviderCredentials): + """Credentials for OpenAI.""" + + api_key: SecretStr = UserConfigurable(from_env="OPENAI_API_KEY") + api_base: Optional[SecretStr] = UserConfigurable( + default=None, from_env="OPENAI_API_BASE_URL" + ) + organization: Optional[SecretStr] = UserConfigurable(from_env="OPENAI_ORGANIZATION") + + api_type: str = UserConfigurable( + default="", + from_env=lambda: ( + "azure" + if os.getenv("USE_AZURE") == "True" + else os.getenv("OPENAI_API_TYPE") + ), + ) + api_version: str = UserConfigurable("", from_env="OPENAI_API_VERSION") + azure_endpoint: Optional[SecretStr] = None + azure_model_to_deploy_id_map: Optional[dict[str, str]] = None + + def get_api_access_kwargs(self) -> dict[str, str]: + kwargs = { + k: (v.get_secret_value() if type(v) is SecretStr else v) + for k, v in { + "api_key": self.api_key, + "base_url": self.api_base, + "organization": self.organization, + }.items() + if v is not None + } + if self.api_type == "azure": + kwargs["api_version"] = self.api_version + assert self.azure_endpoint, "Azure endpoint not configured" + kwargs["azure_endpoint"] = self.azure_endpoint.get_secret_value() + return kwargs + + def get_model_access_kwargs(self, model: str) -> dict[str, str]: + kwargs = {"model": model} + if self.api_type == "azure" and model: + azure_kwargs = self._get_azure_access_kwargs(model) + kwargs.update(azure_kwargs) + return kwargs + + def load_azure_config(self, config_file: Path) -> None: + with open(config_file) as file: + config_params = yaml.load(file, Loader=yaml.SafeLoader) or {} + + try: + assert config_params.get( + "azure_model_map", {} + ), "Azure model->deployment_id map is empty" + except AssertionError as e: + raise ValueError(*e.args) + + self.api_type = config_params.get("azure_api_type", "azure") + self.api_version = config_params.get("azure_api_version", "") + self.azure_endpoint = config_params.get("azure_endpoint") + self.azure_model_to_deploy_id_map = config_params.get("azure_model_map") + + def _get_azure_access_kwargs(self, model: str) -> dict[str, str]: + """Get the kwargs for the Azure API.""" + + if not self.azure_model_to_deploy_id_map: + raise ValueError("Azure model deployment map not configured") + + if model not in self.azure_model_to_deploy_id_map: + raise ValueError(f"No Azure deployment ID configured for model '{model}'") + deployment_id = self.azure_model_to_deploy_id_map[model] + + return {"model": deployment_id} + + +class OpenAISettings(ModelProviderSettings): + configuration: OpenAIConfiguration + credentials: Optional[OpenAICredentials] + budget: ModelProviderBudget + + +class OpenAIProvider( + Configurable[OpenAISettings], ChatModelProvider, EmbeddingModelProvider +): + default_settings = OpenAISettings( + name="openai_provider", + description="Provides access to OpenAI's API.", + configuration=OpenAIConfiguration( + retries_per_request=7, + ), + credentials=None, + budget=ModelProviderBudget(), + ) + + _configuration: OpenAIConfiguration + _credentials: OpenAICredentials + _budget: ModelProviderBudget + + def __init__( + self, + settings: Optional[OpenAISettings] = None, + logger: Optional[logging.Logger] = None, + ): + if not settings: + settings = self.default_settings.copy(deep=True) + if not settings.credentials: + settings.credentials = OpenAICredentials.from_env() + + self._settings = settings + + self._configuration = settings.configuration + self._credentials = settings.credentials + self._budget = settings.budget + + if self._credentials.api_type == "azure": + from openai import AsyncAzureOpenAI + + # API key and org (if configured) are passed, the rest of the required + # credentials is loaded from the environment by the AzureOpenAI client. + self._client = AsyncAzureOpenAI(**self._credentials.get_api_access_kwargs()) + else: + from openai import AsyncOpenAI + + self._client = AsyncOpenAI(**self._credentials.get_api_access_kwargs()) + + self._logger = logger or logging.getLogger(__name__) + + async def get_available_models(self) -> list[ChatModelInfo]: + _models = (await self._client.models.list()).data + return [OPEN_AI_MODELS[m.id] for m in _models if m.id in OPEN_AI_MODELS] + + def get_token_limit(self, model_name: str) -> int: + """Get the token limit for a given model.""" + return OPEN_AI_MODELS[model_name].max_tokens + + @classmethod + def get_tokenizer(cls, model_name: OpenAIModelName) -> ModelTokenizer: + return tiktoken.encoding_for_model(model_name) + + @classmethod + def count_tokens(cls, text: str, model_name: OpenAIModelName) -> int: + encoding = cls.get_tokenizer(model_name) + return len(encoding.encode(text)) + + @classmethod + def count_message_tokens( + cls, + messages: ChatMessage | list[ChatMessage], + model_name: OpenAIModelName, + ) -> int: + if isinstance(messages, ChatMessage): + messages = [messages] + + if model_name.startswith("gpt-3.5-turbo"): + tokens_per_message = ( + 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n + ) + tokens_per_name = -1 # if there's a name, the role is omitted + encoding_model = "gpt-3.5-turbo" + elif model_name.startswith("gpt-4"): + tokens_per_message = 3 + tokens_per_name = 1 + encoding_model = "gpt-4" + else: + raise NotImplementedError( + f"count_message_tokens() is not implemented for model {model_name}.\n" + " See https://github.com/openai/openai-python/blob/main/chatml.md for" + " information on how messages are converted to tokens." + ) + try: + encoding = tiktoken.encoding_for_model(encoding_model) + except KeyError: + logging.getLogger(__class__.__name__).warning( + f"Model {model_name} not found. Defaulting to cl100k_base encoding." + ) + encoding = tiktoken.get_encoding("cl100k_base") + + num_tokens = 0 + for message in messages: + num_tokens += tokens_per_message + for key, value in message.dict().items(): + num_tokens += len(encoding.encode(value)) + if key == "name": + num_tokens += tokens_per_name + num_tokens += 3 # every reply is primed with <|start|>assistant<|message|> + return num_tokens + + async def create_chat_completion( + self, + model_prompt: list[ChatMessage], + model_name: OpenAIModelName, + completion_parser: Callable[[AssistantChatMessage], _T] = lambda _: None, + functions: Optional[list[CompletionModelFunction]] = None, + max_output_tokens: Optional[int] = None, + **kwargs, + ) -> ChatModelResponse[_T]: + """Create a completion using the OpenAI API.""" + + openai_messages, completion_kwargs = self._get_chat_completion_args( + model_prompt=model_prompt, + model_name=model_name, + functions=functions, + max_tokens=max_output_tokens, + **kwargs, + ) + tool_calls_compat_mode = bool(functions and "tools" not in completion_kwargs) + + total_cost = 0.0 + attempts = 0 + while True: + _response, _cost, t_input, t_output = await self._create_chat_completion( + messages=openai_messages, + **completion_kwargs, + ) + total_cost += _cost + + # If parsing the response fails, append the error to the prompt, and let the + # LLM fix its mistake(s). + attempts += 1 + parse_errors: list[Exception] = [] + + _assistant_msg = _response.choices[0].message + + tool_calls, _errors = self._parse_assistant_tool_calls( + _assistant_msg, tool_calls_compat_mode + ) + parse_errors += _errors + + assistant_msg = AssistantChatMessage( + content=_assistant_msg.content, + tool_calls=tool_calls or None, + ) + + parsed_result: _T = None # type: ignore + if not parse_errors: + try: + parsed_result = completion_parser(assistant_msg) + except Exception as e: + parse_errors.append(e) + + if not parse_errors: + if attempts > 1: + self._logger.debug( + f"Total cost for {attempts} attempts: ${round(total_cost, 5)}" + ) + + return ChatModelResponse( + response=AssistantChatMessage( + content=_assistant_msg.content, + tool_calls=tool_calls or None, + ), + parsed_result=parsed_result, + model_info=OPEN_AI_CHAT_MODELS[model_name], + prompt_tokens_used=t_input, + completion_tokens_used=t_output, + ) + + else: + self._logger.debug( + f"Parsing failed on response: '''{_assistant_msg}'''" + ) + self._logger.warning( + f"Parsing attempt #{attempts} failed: {parse_errors}" + ) + for e in parse_errors: + sentry_sdk.capture_exception( + error=e, + extras={"assistant_msg": _assistant_msg, "i_attempt": attempts}, + ) + + if attempts < self._configuration.fix_failed_parse_tries: + openai_messages.append(_assistant_msg.dict(exclude_none=True)) + openai_messages.append( + { + "role": "system", + "content": ( + "ERROR PARSING YOUR RESPONSE:\n\n" + + "\n\n".join( + f"{e.__class__.__name__}: {e}" for e in parse_errors + ) + ), + } + ) + continue + else: + raise parse_errors[0] + + async def create_embedding( + self, + text: str, + model_name: OpenAIModelName, + embedding_parser: Callable[[Embedding], Embedding], + **kwargs, + ) -> EmbeddingModelResponse: + """Create an embedding using the OpenAI API.""" + embedding_kwargs = self._get_embedding_kwargs(model_name, **kwargs) + response = await self._create_embedding(text=text, **embedding_kwargs) + + response = EmbeddingModelResponse( + embedding=embedding_parser(response.data[0].embedding), + model_info=OPEN_AI_EMBEDDING_MODELS[model_name], + prompt_tokens_used=response.usage.prompt_tokens, + completion_tokens_used=0, + ) + self._budget.update_usage_and_cost(response) + return response + + def _get_chat_completion_args( + self, + model_prompt: list[ChatMessage], + model_name: OpenAIModelName, + functions: Optional[list[CompletionModelFunction]] = None, + **kwargs, + ) -> tuple[list[ChatCompletionMessageParam], dict[str, Any]]: + """Prepare chat completion arguments and keyword arguments for API call. + + Args: + model_prompt: List of ChatMessages. + model_name: The model to use. + functions: Optional list of functions available to the LLM. + kwargs: Additional keyword arguments. + + Returns: + list[ChatCompletionMessageParam]: Prompt messages for the OpenAI call + dict[str, Any]: Any other kwargs for the OpenAI call + """ + kwargs.update(self._credentials.get_model_access_kwargs(model_name)) + + if functions: + if OPEN_AI_CHAT_MODELS[model_name].has_function_call_api: + kwargs["tools"] = [ + {"type": "function", "function": f.schema} for f in functions + ] + if len(functions) == 1: + # force the model to call the only specified function + kwargs["tool_choice"] = { + "type": "function", + "function": {"name": functions[0].name}, + } + else: + # Provide compatibility with older models + _functions_compat_fix_kwargs(functions, kwargs) + + if extra_headers := self._configuration.extra_request_headers: + kwargs["extra_headers"] = kwargs.get("extra_headers", {}) + kwargs["extra_headers"].update(extra_headers.copy()) + + if "messages" in kwargs: + model_prompt += kwargs["messages"] + del kwargs["messages"] + + openai_messages: list[ChatCompletionMessageParam] = [ + message.dict( + include={"role", "content", "tool_calls", "name"}, + exclude_none=True, + ) + for message in model_prompt + ] + + return openai_messages, kwargs + + def _get_embedding_kwargs( + self, + model_name: OpenAIModelName, + **kwargs, + ) -> dict: + """Get kwargs for embedding API call. + + Args: + model: The model to use. + kwargs: Keyword arguments to override the default values. + + Returns: + The kwargs for the embedding API call. + + """ + kwargs.update(self._credentials.get_model_access_kwargs(model_name)) + + if extra_headers := self._configuration.extra_request_headers: + kwargs["extra_headers"] = kwargs.get("extra_headers", {}) + kwargs["extra_headers"].update(extra_headers.copy()) + + return kwargs + + async def _create_chat_completion( + self, + messages: list[ChatCompletionMessageParam], + model: OpenAIModelName, + *_, + **kwargs, + ) -> tuple[ChatCompletion, float, int, int]: + """ + Create a chat completion using the OpenAI API with retry handling. + + Params: + openai_messages: List of OpenAI-consumable message dict objects + model: The model to use for the completion + + Returns: + ChatCompletion: The chat completion response object + float: The cost ($) of this completion + int: Number of prompt tokens used + int: Number of completion tokens used + """ + + @self._retry_api_request + async def _create_chat_completion_with_retry( + messages: list[ChatCompletionMessageParam], **kwargs + ) -> ChatCompletion: + return await self._client.chat.completions.create( + messages=messages, # type: ignore + **kwargs, + ) + + completion = await _create_chat_completion_with_retry( + messages, model=model, **kwargs + ) + + if completion.usage: + prompt_tokens_used = completion.usage.prompt_tokens + completion_tokens_used = completion.usage.completion_tokens + else: + prompt_tokens_used = completion_tokens_used = 0 + + cost = self._budget.update_usage_and_cost( + model_info=OPEN_AI_CHAT_MODELS[model], + input_tokens_used=prompt_tokens_used, + output_tokens_used=completion_tokens_used, + ) + self._logger.debug( + f"Completion usage: {prompt_tokens_used} input, " + f"{completion_tokens_used} output - ${round(cost, 5)}" + ) + return completion, cost, prompt_tokens_used, completion_tokens_used + + def _parse_assistant_tool_calls( + self, assistant_message: ChatCompletionMessage, compat_mode: bool = False + ): + tool_calls: list[AssistantToolCall] = [] + parse_errors: list[Exception] = [] + + if assistant_message.tool_calls: + for _tc in assistant_message.tool_calls: + try: + parsed_arguments = json_loads(_tc.function.arguments) + except Exception as e: + err_message = ( + f"Decoding arguments for {_tc.function.name} failed: " + + str(e.args[0]) + ) + parse_errors.append( + type(e)(err_message, *e.args[1:]).with_traceback( + e.__traceback__ + ) + ) + continue + + tool_calls.append( + AssistantToolCall( + id=_tc.id, + type=_tc.type, + function=AssistantFunctionCall( + name=_tc.function.name, + arguments=parsed_arguments, + ), + ) + ) + + # If parsing of all tool calls succeeds in the end, we ignore any issues + if len(tool_calls) == len(assistant_message.tool_calls): + parse_errors = [] + + elif compat_mode and assistant_message.content: + try: + tool_calls = list( + _tool_calls_compat_extract_calls(assistant_message.content) + ) + except Exception as e: + parse_errors.append(e) + + return tool_calls, parse_errors + + def _create_embedding( + self, text: str, *_, **kwargs + ) -> Coroutine[None, None, CreateEmbeddingResponse]: + """Create an embedding using the OpenAI API with retry handling.""" + + @self._retry_api_request + async def _create_embedding_with_retry( + text: str, *_, **kwargs + ) -> CreateEmbeddingResponse: + return await self._client.embeddings.create( + input=[text], + **kwargs, + ) + + return _create_embedding_with_retry(text, *_, **kwargs) + + def _retry_api_request(self, func: Callable[_P, _T]) -> Callable[_P, _T]: + _log_retry_debug_message = tenacity.after_log(self._logger, logging.DEBUG) + + def _log_on_fail(retry_state: tenacity.RetryCallState) -> None: + _log_retry_debug_message(retry_state) + + if ( + retry_state.attempt_number == 0 + and retry_state.outcome + and isinstance(retry_state.outcome.exception(), RateLimitError) + ): + self._logger.warning( + "Please double check that you have setup a PAID OpenAI API Account." + " You can read more here: " + "https://docs.agpt.co/setup/#getting-an-openai-api-key" + ) + + return tenacity.retry( + retry=( + tenacity.retry_if_exception_type(RateLimitError) + | tenacity.retry_if_exception( + lambda e: isinstance(e, APIStatusError) and e.status_code == 502 + ) + ), + wait=tenacity.wait_exponential(), + stop=tenacity.stop_after_attempt(self._configuration.retries_per_request), + after=_log_on_fail, + )(func) + + def __repr__(self): + return "OpenAIProvider()" + + +def format_function_specs_as_typescript_ns( + functions: list[CompletionModelFunction], +) -> str: + """Returns a function signature block in the format used by OpenAI internally: + https://community.openai.com/t/how-to-calculate-the-tokens-when-using-function-call/266573/18 + + For use with `count_tokens` to determine token usage of provided functions. + + Example: + ```ts + namespace functions { + + // Get the current weather in a given location + type get_current_weather = (_: { + // The city and state, e.g. San Francisco, CA + location: string, + unit?: "celsius" | "fahrenheit", + }) => any; + + } // namespace functions + ``` + """ + + return ( + "namespace functions {\n\n" + + "\n\n".join(format_openai_function_for_prompt(f) for f in functions) + + "\n\n} // namespace functions" + ) + + +def format_openai_function_for_prompt(func: CompletionModelFunction) -> str: + """Returns the function formatted similarly to the way OpenAI does it internally: + https://community.openai.com/t/how-to-calculate-the-tokens-when-using-function-call/266573/18 + + Example: + ```ts + // Get the current weather in a given location + type get_current_weather = (_: { + // The city and state, e.g. San Francisco, CA + location: string, + unit?: "celsius" | "fahrenheit", + }) => any; + ``` + """ + + def param_signature(name: str, spec: JSONSchema) -> str: + return ( + f"// {spec.description}\n" if spec.description else "" + ) + f"{name}{'' if spec.required else '?'}: {spec.typescript_type}," + + return "\n".join( + [ + f"// {func.description}", + f"type {func.name} = (_ :{{", + *[param_signature(name, p) for name, p in func.parameters.items()], + "}) => any;", + ] + ) + + +def count_openai_functions_tokens( + functions: list[CompletionModelFunction], count_tokens: Callable[[str], int] +) -> int: + """Returns the number of tokens taken up by a set of function definitions + + Reference: https://community.openai.com/t/how-to-calculate-the-tokens-when-using-function-call/266573/18 # noqa: E501 + """ + return count_tokens( + "# Tools\n\n" + "## functions\n\n" + f"{format_function_specs_as_typescript_ns(functions)}" + ) + + +def _functions_compat_fix_kwargs( + functions: list[CompletionModelFunction], + completion_kwargs: dict, +): + function_definitions = format_function_specs_as_typescript_ns(functions) + function_call_schema = JSONSchema( + type=JSONSchema.Type.OBJECT, + properties={ + "name": JSONSchema( + description="The name of the function to call", + enum=[f.name for f in functions], + required=True, + ), + "arguments": JSONSchema( + description="The arguments for the function call", + type=JSONSchema.Type.OBJECT, + required=True, + ), + }, + ) + tool_calls_schema = JSONSchema( + type=JSONSchema.Type.ARRAY, + items=JSONSchema( + type=JSONSchema.Type.OBJECT, + properties={ + "type": JSONSchema( + type=JSONSchema.Type.STRING, + enum=["function"], + ), + "function": function_call_schema, + }, + ), + ) + completion_kwargs["messages"] = [ + ChatMessage.system( + "# tool usage instructions\n\n" + "Specify a '```tool_calls' block in your response," + " with a valid JSON object that adheres to the following schema:\n\n" + f"{tool_calls_schema.to_dict()}\n\n" + "Specify any tools that you need to use through this JSON object.\n\n" + "Put the tool_calls block at the end of your response" + " and include its fences if it is not the only content.\n\n" + "## functions\n\n" + "For the function call itself, use one of the following" + f" functions:\n\n{function_definitions}" + ), + ] + + +def _tool_calls_compat_extract_calls(response: str) -> Iterator[AssistantToolCall]: + import re + import uuid + + logging.debug(f"Trying to extract tool calls from response:\n{response}") + + if response[0] == "[": + tool_calls: list[AssistantToolCallDict] = json_loads(response) + else: + block = re.search(r"```(?:tool_calls)?\n(.*)\n```\s*$", response, re.DOTALL) + if not block: + raise ValueError("Could not find tool_calls block in response") + tool_calls: list[AssistantToolCallDict] = json_loads(block.group(1)) + + for t in tool_calls: + t["id"] = str(uuid.uuid4()) + t["function"]["arguments"] = str(t["function"]["arguments"]) # HACK + + yield AssistantToolCall.parse_obj(t) diff --git a/autogpts/autogpt/autogpt/core/resource/model_providers/schema.py b/autogpts/autogpt/autogpt/core/resource/model_providers/schema.py new file mode 100644 index 000000000000..dd69b526ea92 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/resource/model_providers/schema.py @@ -0,0 +1,359 @@ +import abc +import enum +import math +from collections import defaultdict +from typing import ( + Any, + Callable, + ClassVar, + Generic, + Literal, + Optional, + Protocol, + TypedDict, + TypeVar, +) + +from pydantic import BaseModel, Field, SecretStr, validator + +from autogpt.core.configuration import SystemConfiguration, UserConfigurable +from autogpt.core.resource.schema import ( + Embedding, + ProviderBudget, + ProviderCredentials, + ProviderSettings, + ProviderUsage, + ResourceType, +) +from autogpt.core.utils.json_schema import JSONSchema + + +class ModelProviderService(str, enum.Enum): + """A ModelService describes what kind of service the model provides.""" + + EMBEDDING = "embedding" + CHAT = "chat_completion" + TEXT = "text_completion" + + +class ModelProviderName(str, enum.Enum): + OPENAI = "openai" + + +class ChatMessage(BaseModel): + class Role(str, enum.Enum): + USER = "user" + SYSTEM = "system" + ASSISTANT = "assistant" + + TOOL = "tool" + """May be used for the result of tool calls""" + FUNCTION = "function" + """May be used for the return value of function calls""" + + role: Role + content: str + + @staticmethod + def user(content: str) -> "ChatMessage": + return ChatMessage(role=ChatMessage.Role.USER, content=content) + + @staticmethod + def system(content: str) -> "ChatMessage": + return ChatMessage(role=ChatMessage.Role.SYSTEM, content=content) + + +class ChatMessageDict(TypedDict): + role: str + content: str + + +class AssistantFunctionCall(BaseModel): + name: str + arguments: dict[str, Any] + + +class AssistantFunctionCallDict(TypedDict): + name: str + arguments: dict[str, Any] + + +class AssistantToolCall(BaseModel): + id: str + type: Literal["function"] + function: AssistantFunctionCall + + +class AssistantToolCallDict(TypedDict): + id: str + type: Literal["function"] + function: AssistantFunctionCallDict + + +class AssistantChatMessage(ChatMessage): + role: Literal[ChatMessage.Role.ASSISTANT] = ChatMessage.Role.ASSISTANT + content: Optional[str] + tool_calls: Optional[list[AssistantToolCall]] = None + + +class AssistantChatMessageDict(TypedDict, total=False): + role: str + content: str + tool_calls: list[AssistantToolCallDict] + + +class CompletionModelFunction(BaseModel): + """General representation object for LLM-callable functions.""" + + name: str + description: str + parameters: dict[str, "JSONSchema"] + + @property + def schema(self) -> dict[str, str | dict | list]: + """Returns an OpenAI-consumable function specification""" + + return { + "name": self.name, + "description": self.description, + "parameters": { + "type": "object", + "properties": { + name: param.to_dict() for name, param in self.parameters.items() + }, + "required": [ + name for name, param in self.parameters.items() if param.required + ], + }, + } + + @staticmethod + def parse(schema: dict) -> "CompletionModelFunction": + return CompletionModelFunction( + name=schema["name"], + description=schema["description"], + parameters=JSONSchema.parse_properties(schema["parameters"]), + ) + + def fmt_line(self) -> str: + params = ", ".join( + f"{name}{'?' if not p.required else ''}: " f"{p.typescript_type}" + for name, p in self.parameters.items() + ) + return f"{self.name}: {self.description}. Params: ({params})" + + +class ModelInfo(BaseModel): + """Struct for model information. + + Would be lovely to eventually get this directly from APIs, but needs to be + scraped from websites for now. + """ + + name: str + service: ModelProviderService + provider_name: ModelProviderName + prompt_token_cost: float = 0.0 + completion_token_cost: float = 0.0 + + +class ModelResponse(BaseModel): + """Standard response struct for a response from a model.""" + + prompt_tokens_used: int + completion_tokens_used: int + model_info: ModelInfo + + +class ModelProviderConfiguration(SystemConfiguration): + retries_per_request: int = UserConfigurable() + extra_request_headers: dict[str, str] = Field(default_factory=dict) + + +class ModelProviderCredentials(ProviderCredentials): + """Credentials for a model provider.""" + + api_key: SecretStr | None = UserConfigurable(default=None) + api_type: SecretStr | None = UserConfigurable(default=None) + api_base: SecretStr | None = UserConfigurable(default=None) + api_version: SecretStr | None = UserConfigurable(default=None) + deployment_id: SecretStr | None = UserConfigurable(default=None) + + class Config: + extra = "ignore" + + +class ModelProviderUsage(ProviderUsage): + """Usage for a particular model from a model provider.""" + + completion_tokens: int = 0 + prompt_tokens: int = 0 + + def update_usage( + self, + input_tokens_used: int, + output_tokens_used: int = 0, + ) -> None: + self.prompt_tokens += input_tokens_used + self.completion_tokens += output_tokens_used + + +class ModelProviderBudget(ProviderBudget): + usage: defaultdict[str, ModelProviderUsage] = defaultdict(ModelProviderUsage) + + def update_usage_and_cost( + self, + model_info: ModelInfo, + input_tokens_used: int, + output_tokens_used: int = 0, + ) -> float: + """Update the usage and cost of the provider. + + Returns: + float: The (calculated) cost of the given model response. + """ + self.usage[model_info.name].update_usage(input_tokens_used, output_tokens_used) + incurred_cost = ( + output_tokens_used * model_info.completion_token_cost + + input_tokens_used * model_info.prompt_token_cost + ) + self.total_cost += incurred_cost + self.remaining_budget -= incurred_cost + return incurred_cost + + +class ModelProviderSettings(ProviderSettings): + resource_type: ResourceType = ResourceType.MODEL + configuration: ModelProviderConfiguration + credentials: ModelProviderCredentials + budget: Optional[ModelProviderBudget] = None + + +class ModelProvider(abc.ABC): + """A ModelProvider abstracts the details of a particular provider of models.""" + + default_settings: ClassVar[ModelProviderSettings] + + _configuration: ModelProviderConfiguration + _budget: Optional[ModelProviderBudget] = None + + @abc.abstractmethod + def count_tokens(self, text: str, model_name: str) -> int: + ... + + @abc.abstractmethod + def get_tokenizer(self, model_name: str) -> "ModelTokenizer": + ... + + @abc.abstractmethod + def get_token_limit(self, model_name: str) -> int: + ... + + def get_incurred_cost(self) -> float: + if self._budget: + return self._budget.total_cost + return 0 + + def get_remaining_budget(self) -> float: + if self._budget: + return self._budget.remaining_budget + return math.inf + + +class ModelTokenizer(Protocol): + """A ModelTokenizer provides tokenization specific to a model.""" + + @abc.abstractmethod + def encode(self, text: str) -> list: + ... + + @abc.abstractmethod + def decode(self, tokens: list) -> str: + ... + + +#################### +# Embedding Models # +#################### + + +class EmbeddingModelInfo(ModelInfo): + """Struct for embedding model information.""" + + service: Literal[ModelProviderService.EMBEDDING] = ModelProviderService.EMBEDDING + max_tokens: int + embedding_dimensions: int + + +class EmbeddingModelResponse(ModelResponse): + """Standard response struct for a response from an embedding model.""" + + embedding: Embedding = Field(default_factory=list) + + @classmethod + @validator("completion_tokens_used") + def _verify_no_completion_tokens_used(cls, v): + if v > 0: + raise ValueError("Embeddings should not have completion tokens used.") + return v + + +class EmbeddingModelProvider(ModelProvider): + @abc.abstractmethod + async def create_embedding( + self, + text: str, + model_name: str, + embedding_parser: Callable[[Embedding], Embedding], + **kwargs, + ) -> EmbeddingModelResponse: + ... + + +############### +# Chat Models # +############### + + +class ChatModelInfo(ModelInfo): + """Struct for language model information.""" + + service: Literal[ModelProviderService.CHAT] = ModelProviderService.CHAT + max_tokens: int + has_function_call_api: bool = False + + +_T = TypeVar("_T") + + +class ChatModelResponse(ModelResponse, Generic[_T]): + """Standard response struct for a response from a language model.""" + + response: AssistantChatMessage + parsed_result: _T = None + + +class ChatModelProvider(ModelProvider): + @abc.abstractmethod + async def get_available_models(self) -> list[ChatModelInfo]: + ... + + @abc.abstractmethod + def count_message_tokens( + self, + messages: ChatMessage | list[ChatMessage], + model_name: str, + ) -> int: + ... + + @abc.abstractmethod + async def create_chat_completion( + self, + model_prompt: list[ChatMessage], + model_name: str, + completion_parser: Callable[[AssistantChatMessage], _T] = lambda _: None, + functions: Optional[list[CompletionModelFunction]] = None, + max_output_tokens: Optional[int] = None, + **kwargs, + ) -> ChatModelResponse[_T]: + ... diff --git a/autogpts/autogpt/autogpt/core/resource/schema.py b/autogpts/autogpt/autogpt/core/resource/schema.py new file mode 100644 index 000000000000..0da275ee2704 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/resource/schema.py @@ -0,0 +1,76 @@ +import abc +import enum +import math + +from pydantic import BaseModel, SecretBytes, SecretField, SecretStr + +from autogpt.core.configuration import ( + SystemConfiguration, + SystemSettings, + UserConfigurable, +) + + +class ResourceType(str, enum.Enum): + """An enumeration of resource types.""" + + MODEL = "model" + MEMORY = "memory" + + +class ProviderUsage(SystemConfiguration, abc.ABC): + @abc.abstractmethod + def update_usage(self, *args, **kwargs) -> None: + """Update the usage of the resource.""" + ... + + +class ProviderBudget(SystemConfiguration): + total_budget: float = UserConfigurable(math.inf) + total_cost: float = 0 + remaining_budget: float = math.inf + usage: ProviderUsage + + @abc.abstractmethod + def update_usage_and_cost(self, *args, **kwargs) -> float: + """Update the usage and cost of the provider. + + Returns: + float: The (calculated) cost of the given model response. + """ + ... + + +class ProviderCredentials(SystemConfiguration): + """Struct for credentials.""" + + def unmasked(self) -> dict: + return unmask(self) + + class Config: + json_encoders = { + SecretStr: lambda v: v.get_secret_value() if v else None, + SecretBytes: lambda v: v.get_secret_value() if v else None, + SecretField: lambda v: v.get_secret_value() if v else None, + } + + +def unmask(model: BaseModel): + unmasked_fields = {} + for field_name, _ in model.__fields__.items(): + value = getattr(model, field_name) + if isinstance(value, SecretStr): + unmasked_fields[field_name] = value.get_secret_value() + else: + unmasked_fields[field_name] = value + return unmasked_fields + + +class ProviderSettings(SystemSettings): + resource_type: ResourceType + credentials: ProviderCredentials | None = None + budget: ProviderBudget | None = None + + +# Used both by model providers and memory providers +Embedding = list[float] diff --git a/autogpts/autogpt/autogpt/core/runner/__init__.py b/autogpts/autogpt/autogpt/core/runner/__init__.py new file mode 100644 index 000000000000..25c7b6508806 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/runner/__init__.py @@ -0,0 +1,3 @@ +""" +This module contains the runner for the v2 agent server and client. +""" diff --git a/autogpts/autogpt/autogpt/core/runner/cli_app/__init__.py b/autogpts/autogpt/autogpt/core/runner/cli_app/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/autogpts/autogpt/autogpt/core/runner/cli_app/cli.py b/autogpts/autogpt/autogpt/core/runner/cli_app/cli.py new file mode 100644 index 000000000000..d54acf53b27e --- /dev/null +++ b/autogpts/autogpt/autogpt/core/runner/cli_app/cli.py @@ -0,0 +1,47 @@ +from pathlib import Path + +import click +import yaml + +from autogpt.core.runner.cli_app.main import run_auto_gpt +from autogpt.core.runner.client_lib.shared_click_commands import ( + DEFAULT_SETTINGS_FILE, + make_settings, +) +from autogpt.core.runner.client_lib.utils import coroutine, handle_exceptions + + +@click.group() +def autogpt(): + """Temporary command group for v2 commands.""" + pass + + +autogpt.add_command(make_settings) + + +@autogpt.command() +@click.option( + "--settings-file", + type=click.Path(), + default=DEFAULT_SETTINGS_FILE, +) +@click.option( + "--pdb", + is_flag=True, + help="Drop into a debugger if an error is raised.", +) +@coroutine +async def run(settings_file: str, pdb: bool) -> None: + """Run the AutoGPT agent.""" + click.echo("Running AutoGPT agent...") + settings_file: Path = Path(settings_file) + settings = {} + if settings_file.exists(): + settings = yaml.safe_load(settings_file.read_text()) + main = handle_exceptions(run_auto_gpt, with_debugger=pdb) + await main(settings) + + +if __name__ == "__main__": + autogpt() diff --git a/autogpts/autogpt/autogpt/core/runner/cli_app/main.py b/autogpts/autogpt/autogpt/core/runner/cli_app/main.py new file mode 100644 index 000000000000..d6bb5c4f0206 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/runner/cli_app/main.py @@ -0,0 +1,74 @@ +import click + +from autogpt.core.agent import AgentSettings, SimpleAgent +from autogpt.core.runner.client_lib.logging import ( + configure_root_logger, + get_client_logger, +) +from autogpt.core.runner.client_lib.parser import ( + parse_ability_result, + parse_agent_name_and_goals, + parse_agent_plan, + parse_next_ability, +) + + +async def run_auto_gpt(user_configuration: dict): + """Run the AutoGPT CLI client.""" + + configure_root_logger() + + client_logger = get_client_logger() + client_logger.debug("Getting agent settings") + + agent_workspace = ( + user_configuration.get("workspace", {}).get("configuration", {}).get("root", "") + ) + + if not agent_workspace: # We don't have an agent yet. + ################# + # Bootstrapping # + ################# + # Step 1. Collate the user's settings with the default system settings. + agent_settings: AgentSettings = SimpleAgent.compile_settings( + client_logger, + user_configuration, + ) + + # Step 2. Get a name and goals for the agent. + # First we need to figure out what the user wants to do with the agent. + # We'll do this by asking the user for a prompt. + user_objective = click.prompt("What do you want AutoGPT to do?") + # Ask a language model to determine a name and goals for a suitable agent. + name_and_goals = await SimpleAgent.determine_agent_name_and_goals( + user_objective, + agent_settings, + client_logger, + ) + print("\n" + parse_agent_name_and_goals(name_and_goals)) + # Finally, update the agent settings with the name and goals. + agent_settings.update_agent_name_and_goals(name_and_goals) + + # Step 3. Provision the agent. + agent_workspace = SimpleAgent.provision_agent(agent_settings, client_logger) + client_logger.info("Agent is provisioned") + + # launch agent interaction loop + agent = SimpleAgent.from_workspace( + agent_workspace, + client_logger, + ) + client_logger.info("Agent is loaded") + + plan = await agent.build_initial_plan() + print(parse_agent_plan(plan)) + + while True: + current_task, next_ability = await agent.determine_next_ability(plan) + print(parse_next_ability(current_task, next_ability)) + user_input = click.prompt( + "Should the agent proceed with this ability?", + default="y", + ) + ability_result = await agent.execute_next_ability(user_input) + print(parse_ability_result(ability_result)) diff --git a/autogpts/autogpt/autogpt/core/runner/cli_web_app/__init__.py b/autogpts/autogpt/autogpt/core/runner/cli_web_app/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/autogpts/autogpt/autogpt/core/runner/cli_web_app/cli.py b/autogpts/autogpt/autogpt/core/runner/cli_web_app/cli.py new file mode 100644 index 000000000000..e00bb33b734b --- /dev/null +++ b/autogpts/autogpt/autogpt/core/runner/cli_web_app/cli.py @@ -0,0 +1,58 @@ +import pathlib + +import click +import yaml +from agent_protocol import Agent as AgentProtocol + +from autogpt.core.runner.cli_web_app.server.api import task_handler +from autogpt.core.runner.client_lib.shared_click_commands import ( + DEFAULT_SETTINGS_FILE, + make_settings, +) +from autogpt.core.runner.client_lib.utils import coroutine + + +@click.group() +def autogpt(): + """Temporary command group for v2 commands.""" + pass + + +autogpt.add_command(make_settings) + + +@autogpt.command() +@click.option( + "port", + "--port", + default=8080, + help="The port of the webserver.", + type=click.INT, +) +def server(port: int) -> None: + """Run the AutoGPT runner httpserver.""" + click.echo("Running AutoGPT runner httpserver...") + AgentProtocol.handle_task(task_handler).start(port) + + +@autogpt.command() +@click.option( + "--settings-file", + type=click.Path(), + default=DEFAULT_SETTINGS_FILE, +) +@coroutine +async def client(settings_file) -> None: + """Run the AutoGPT runner client.""" + settings_file = pathlib.Path(settings_file) + settings = {} + if settings_file.exists(): + settings = yaml.safe_load(settings_file.read_text()) + + settings + # TODO: Call the API server with the settings and task, + # using the Python API client for agent protocol. + + +if __name__ == "__main__": + autogpt() diff --git a/autogpts/autogpt/autogpt/core/runner/cli_web_app/server/__init__.py b/autogpts/autogpt/autogpt/core/runner/cli_web_app/server/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/autogpts/autogpt/autogpt/core/runner/cli_web_app/server/api.py b/autogpts/autogpt/autogpt/core/runner/cli_web_app/server/api.py new file mode 100644 index 000000000000..eadb20e58a82 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/runner/cli_web_app/server/api.py @@ -0,0 +1,99 @@ +import logging + +from agent_protocol import StepHandler, StepResult + +from autogpt.agents import Agent +from autogpt.app.main import UserFeedback +from autogpt.commands import COMMAND_CATEGORIES +from autogpt.config import AIProfile, ConfigBuilder +from autogpt.logs.helpers import user_friendly_output +from autogpt.models.command_registry import CommandRegistry +from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT + + +async def task_handler(task_input) -> StepHandler: + task = task_input.__root__ if task_input else {} + agent = bootstrap_agent(task.get("user_input"), False) + + next_command_name: str | None = None + next_command_args: dict[str, str] | None = None + + async def step_handler(step_input) -> StepResult: + step = step_input.__root__ if step_input else {} + + nonlocal next_command_name, next_command_args + + result = await interaction_step( + agent, + step.get("user_input"), + step.get("user_feedback"), + next_command_name, + next_command_args, + ) + + next_command_name = result["next_step_command_name"] if result else None + next_command_args = result["next_step_command_args"] if result else None + + if not result: + return StepResult(output=None, is_last=True) + return StepResult(output=result) + + return step_handler + + +async def interaction_step( + agent: Agent, + user_input, + user_feedback: UserFeedback | None, + command_name: str | None, + command_args: dict[str, str] | None, +): + """Run one step of the interaction loop.""" + if user_feedback == UserFeedback.EXIT: + return + if user_feedback == UserFeedback.TEXT: + command_name = "human_feedback" + + result: str | None = None + + if command_name is not None: + result = agent.execute(command_name, command_args, user_input) + if result is None: + user_friendly_output( + title="SYSTEM:", message="Unable to execute command", level=logging.WARN + ) + return + + next_command_name, next_command_args, assistant_reply_dict = agent.propose_action() + + return { + "config": agent.config, + "ai_profile": agent.ai_profile, + "result": result, + "assistant_reply_dict": assistant_reply_dict, + "next_step_command_name": next_command_name, + "next_step_command_args": next_command_args, + } + + +def bootstrap_agent(task, continuous_mode) -> Agent: + config = ConfigBuilder.build_config_from_env() + config.logging.level = logging.DEBUG + config.logging.plain_console_output = True + config.continuous_mode = continuous_mode + config.temperature = 0 + command_registry = CommandRegistry.with_command_modules(COMMAND_CATEGORIES, config) + config.memory_backend = "no_memory" + ai_profile = AIProfile( + ai_name="AutoGPT", + ai_role="a multi-purpose AI assistant.", + ai_goals=[task], + ) + # FIXME this won't work - ai_profile and triggering_prompt is not a valid argument, + # lacks file_storage, settings and llm_provider + return Agent( + command_registry=command_registry, + ai_profile=ai_profile, + legacy_config=config, + triggering_prompt=DEFAULT_TRIGGERING_PROMPT, + ) diff --git a/autogpts/autogpt/autogpt/core/runner/client_lib/__init__.py b/autogpts/autogpt/autogpt/core/runner/client_lib/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/autogpts/autogpt/autogpt/core/runner/client_lib/logging/__init__.py b/autogpts/autogpt/autogpt/core/runner/client_lib/logging/__init__.py new file mode 100644 index 000000000000..6d263b6ad1a6 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/runner/client_lib/logging/__init__.py @@ -0,0 +1,22 @@ +import logging + +from .config import BelowLevelFilter, FancyConsoleFormatter, configure_root_logger +from .helpers import dump_prompt + + +def get_client_logger(): + # Configure logging before we do anything else. + # Application logs need a place to live. + client_logger = logging.getLogger("autogpt_client_application") + client_logger.setLevel(logging.DEBUG) + + return client_logger + + +__all__ = [ + "configure_root_logger", + "get_client_logger", + "FancyConsoleFormatter", + "BelowLevelFilter", + "dump_prompt", +] diff --git a/autogpts/autogpt/autogpt/core/runner/client_lib/logging/config.py b/autogpts/autogpt/autogpt/core/runner/client_lib/logging/config.py new file mode 100644 index 000000000000..56f79f5fe0d0 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/runner/client_lib/logging/config.py @@ -0,0 +1,82 @@ +import logging +import sys + +from colorama import Fore, Style +from openai._base_client import log as openai_logger + +SIMPLE_LOG_FORMAT = "%(asctime)s %(levelname)s %(message)s" +DEBUG_LOG_FORMAT = ( + "%(asctime)s.%(msecs)03d %(levelname)s %(filename)s:%(lineno)d %(message)s" +) + + +def configure_root_logger(): + console_formatter = FancyConsoleFormatter(SIMPLE_LOG_FORMAT) + + stdout = logging.StreamHandler(stream=sys.stdout) + stdout.setLevel(logging.DEBUG) + stdout.addFilter(BelowLevelFilter(logging.WARNING)) + stdout.setFormatter(console_formatter) + stderr = logging.StreamHandler() + stderr.setLevel(logging.WARNING) + stderr.setFormatter(console_formatter) + + logging.basicConfig(level=logging.DEBUG, handlers=[stdout, stderr]) + + # Disable debug logging from OpenAI library + openai_logger.setLevel(logging.WARNING) + + +class FancyConsoleFormatter(logging.Formatter): + """ + A custom logging formatter designed for console output. + + This formatter enhances the standard logging output with color coding. The color + coding is based on the level of the log message, making it easier to distinguish + between different types of messages in the console output. + + The color for each level is defined in the LEVEL_COLOR_MAP class attribute. + """ + + # level -> (level & text color, title color) + LEVEL_COLOR_MAP = { + logging.DEBUG: Fore.LIGHTBLACK_EX, + logging.INFO: Fore.BLUE, + logging.WARNING: Fore.YELLOW, + logging.ERROR: Fore.RED, + logging.CRITICAL: Fore.RED + Style.BRIGHT, + } + + def format(self, record: logging.LogRecord) -> str: + # Make sure `msg` is a string + if not hasattr(record, "msg"): + record.msg = "" + elif not type(record.msg) is str: + record.msg = str(record.msg) + + # Determine default color based on error level + level_color = "" + if record.levelno in self.LEVEL_COLOR_MAP: + level_color = self.LEVEL_COLOR_MAP[record.levelno] + record.levelname = f"{level_color}{record.levelname}{Style.RESET_ALL}" + + # Determine color for message + color = getattr(record, "color", level_color) + color_is_specified = hasattr(record, "color") + + # Don't color INFO messages unless the color is explicitly specified. + if color and (record.levelno != logging.INFO or color_is_specified): + record.msg = f"{color}{record.msg}{Style.RESET_ALL}" + + return super().format(record) + + +class BelowLevelFilter(logging.Filter): + """Filter for logging levels below a certain threshold.""" + + def __init__(self, below_level: int): + super().__init__() + self.below_level = below_level + + def filter(self, record: logging.LogRecord): + return record.levelno < self.below_level diff --git a/autogpts/autogpt/autogpt/core/runner/client_lib/logging/helpers.py b/autogpts/autogpt/autogpt/core/runner/client_lib/logging/helpers.py new file mode 100644 index 000000000000..d341f16ca239 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/runner/client_lib/logging/helpers.py @@ -0,0 +1,23 @@ +from math import ceil, floor +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from autogpt.core.prompting import ChatPrompt + +SEPARATOR_LENGTH = 42 + + +def dump_prompt(prompt: "ChatPrompt") -> str: + def separator(text: str): + half_sep_len = (SEPARATOR_LENGTH - 2 - len(text)) / 2 + return f"{floor(half_sep_len)*'-'} {text.upper()} {ceil(half_sep_len)*'-'}" + + formatted_messages = "\n".join( + [f"{separator(m.role)}\n{m.content}" for m in prompt.messages] + ) + return f""" +============== {prompt.__class__.__name__} ============== +Length: {len(prompt.messages)} messages +{formatted_messages} +========================================== +""" diff --git a/autogpts/autogpt/autogpt/core/runner/client_lib/parser.py b/autogpts/autogpt/autogpt/core/runner/client_lib/parser.py new file mode 100644 index 000000000000..54af17403ec6 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/runner/client_lib/parser.py @@ -0,0 +1,45 @@ +def parse_agent_name_and_goals(name_and_goals: dict) -> str: + parsed_response = f"Agent Name: {name_and_goals['agent_name']}\n" + parsed_response += f"Agent Role: {name_and_goals['agent_role']}\n" + parsed_response += "Agent Goals:\n" + for i, goal in enumerate(name_and_goals["agent_goals"]): + parsed_response += f"{i+1}. {goal}\n" + return parsed_response + + +def parse_agent_plan(plan: dict) -> str: + parsed_response = "Agent Plan:\n" + for i, task in enumerate(plan["task_list"]): + parsed_response += f"{i+1}. {task['objective']}\n" + parsed_response += f"Task type: {task['type']} " + parsed_response += f"Priority: {task['priority']}\n" + parsed_response += "Ready Criteria:\n" + for j, criteria in enumerate(task["ready_criteria"]): + parsed_response += f" {j+1}. {criteria}\n" + parsed_response += "Acceptance Criteria:\n" + for j, criteria in enumerate(task["acceptance_criteria"]): + parsed_response += f" {j+1}. {criteria}\n" + parsed_response += "\n" + + return parsed_response + + +def parse_next_ability(current_task, next_ability: dict) -> str: + parsed_response = f"Current Task: {current_task.objective}\n" + ability_args = ", ".join( + f"{k}={v}" for k, v in next_ability["ability_arguments"].items() + ) + parsed_response += f"Next Ability: {next_ability['next_ability']}({ability_args})\n" + parsed_response += f"Motivation: {next_ability['motivation']}\n" + parsed_response += f"Self-criticism: {next_ability['self_criticism']}\n" + parsed_response += f"Reasoning: {next_ability['reasoning']}\n" + return parsed_response + + +def parse_ability_result(ability_result) -> str: + parsed_response = f"Ability: {ability_result['ability_name']}\n" + parsed_response += f"Ability Arguments: {ability_result['ability_args']}\n" + parsed_response += f"Ability Result: {ability_result['success']}\n" + parsed_response += f"Message: {ability_result['message']}\n" + parsed_response += f"Data: {ability_result['new_knowledge']}\n" + return parsed_response diff --git a/autogpts/autogpt/autogpt/core/runner/client_lib/settings.py b/autogpts/autogpt/autogpt/core/runner/client_lib/settings.py new file mode 100644 index 000000000000..9c9983024050 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/runner/client_lib/settings.py @@ -0,0 +1,14 @@ +from pathlib import Path + +import yaml + +from autogpt.core.agent import SimpleAgent + + +def make_user_configuration(settings_file_path: Path): + user_configuration = SimpleAgent.build_user_configuration() + + settings_file_path.parent.mkdir(parents=True, exist_ok=True) + print("Writing settings to", settings_file_path) + with settings_file_path.open("w") as f: + yaml.safe_dump(user_configuration, f) diff --git a/autogpts/autogpt/autogpt/core/runner/client_lib/shared_click_commands.py b/autogpts/autogpt/autogpt/core/runner/client_lib/shared_click_commands.py new file mode 100644 index 000000000000..5be52acb8362 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/runner/client_lib/shared_click_commands.py @@ -0,0 +1,19 @@ +import pathlib + +import click + +DEFAULT_SETTINGS_FILE = str( + pathlib.Path("~/auto-gpt/default_agent_settings.yml").expanduser() +) + + +@click.command() +@click.option( + "--settings-file", + type=click.Path(), + default=DEFAULT_SETTINGS_FILE, +) +def make_settings(settings_file: str) -> None: + from autogpt.core.runner.client_lib.settings import make_user_configuration + + make_user_configuration(pathlib.Path(settings_file)) diff --git a/autogpts/autogpt/autogpt/core/runner/client_lib/utils.py b/autogpts/autogpt/autogpt/core/runner/client_lib/utils.py new file mode 100644 index 000000000000..887683df7ca1 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/runner/client_lib/utils.py @@ -0,0 +1,62 @@ +import asyncio +import functools +from bdb import BdbQuit +from typing import Any, Callable, Coroutine, ParamSpec, TypeVar + +import click + +P = ParamSpec("P") +T = TypeVar("T") + + +def handle_exceptions( + application_main: Callable[P, T], + with_debugger: bool, +) -> Callable[P, T]: + """Wraps a function so that it drops a user into a debugger if it raises an error. + + This is intended to be used as a wrapper for the main function of a CLI application. + It will catch all errors and drop a user into a debugger if the error is not a + `KeyboardInterrupt`. If the error is a `KeyboardInterrupt`, it will raise the error. + If the error is not a `KeyboardInterrupt`, it will log the error and drop a user + into a debugger if `with_debugger` is `True`. + If `with_debugger` is `False`, it will raise the error. + + Parameters + ---------- + application_main + The function to wrap. + with_debugger + Whether to drop a user into a debugger if an error is raised. + + Returns + ------- + Callable + The wrapped function. + + """ + + @functools.wraps(application_main) + async def wrapped(*args: P.args, **kwargs: P.kwargs) -> T: + try: + return await application_main(*args, **kwargs) + except (BdbQuit, KeyboardInterrupt, click.Abort): + raise + except Exception as e: + if with_debugger: + print(f"Uncaught exception {e}") + import pdb + + pdb.post_mortem() + else: + raise + + return wrapped + + +def coroutine(f: Callable[P, Coroutine[Any, Any, T]]) -> Callable[P, T]: + @functools.wraps(f) + def wrapper(*args: P.args, **kwargs: P.kwargs): + return asyncio.run(f(*args, **kwargs)) + + return wrapper diff --git a/autogpts/autogpt/autogpt/core/utils/json_schema.py b/autogpts/autogpt/autogpt/core/utils/json_schema.py new file mode 100644 index 000000000000..d72b509dd0ce --- /dev/null +++ b/autogpts/autogpt/autogpt/core/utils/json_schema.py @@ -0,0 +1,142 @@ +import enum +from textwrap import indent +from typing import Optional + +from jsonschema import Draft7Validator, ValidationError +from pydantic import BaseModel + + +class JSONSchema(BaseModel): + class Type(str, enum.Enum): + STRING = "string" + ARRAY = "array" + OBJECT = "object" + NUMBER = "number" + INTEGER = "integer" + BOOLEAN = "boolean" + + # TODO: add docstrings + description: Optional[str] = None + type: Optional[Type] = None + enum: Optional[list] = None + required: bool = False + items: Optional["JSONSchema"] = None + properties: Optional[dict[str, "JSONSchema"]] = None + minimum: Optional[int | float] = None + maximum: Optional[int | float] = None + minItems: Optional[int] = None + maxItems: Optional[int] = None + + def to_dict(self) -> dict: + schema: dict = { + "type": self.type.value if self.type else None, + "description": self.description, + } + if self.type == "array": + if self.items: + schema["items"] = self.items.to_dict() + schema["minItems"] = self.minItems + schema["maxItems"] = self.maxItems + elif self.type == "object": + if self.properties: + schema["properties"] = { + name: prop.to_dict() for name, prop in self.properties.items() + } + schema["required"] = [ + name for name, prop in self.properties.items() if prop.required + ] + elif self.enum: + schema["enum"] = self.enum + else: + schema["minumum"] = self.minimum + schema["maximum"] = self.maximum + + schema = {k: v for k, v in schema.items() if v is not None} + + return schema + + @staticmethod + def from_dict(schema: dict) -> "JSONSchema": + return JSONSchema( + description=schema.get("description"), + type=schema["type"], + enum=schema["enum"] if "enum" in schema else None, + items=JSONSchema.from_dict(schema["items"]) if "items" in schema else None, + properties=JSONSchema.parse_properties(schema) + if schema["type"] == "object" + else None, + minimum=schema.get("minimum"), + maximum=schema.get("maximum"), + minItems=schema.get("minItems"), + maxItems=schema.get("maxItems"), + ) + + @staticmethod + def parse_properties(schema_node: dict) -> dict[str, "JSONSchema"]: + properties = ( + {k: JSONSchema.from_dict(v) for k, v in schema_node["properties"].items()} + if "properties" in schema_node + else {} + ) + if "required" in schema_node: + for k, v in properties.items(): + v.required = k in schema_node["required"] + return properties + + def validate_object(self, object: object) -> tuple[bool, list[ValidationError]]: + """ + Validates an object or a value against the JSONSchema. + + Params: + object: The value/object to validate. + schema (JSONSchema): The JSONSchema to validate against. + + Returns: + bool: Indicates whether the given value or object is valid for the schema. + list[ValidationError]: The issues with the value or object (if any). + """ + validator = Draft7Validator(self.to_dict()) + + if errors := sorted(validator.iter_errors(object), key=lambda e: e.path): + return False, errors + + return True, [] + + def to_typescript_object_interface(self, interface_name: str = "") -> str: + if self.type != JSONSchema.Type.OBJECT: + raise NotImplementedError("Only `object` schemas are supported") + + if self.properties: + attributes: list[str] = [] + for name, property in self.properties.items(): + if property.description: + attributes.append(f"// {property.description}") + attributes.append(f"{name}: {property.typescript_type};") + attributes_string = "\n".join(attributes) + else: + attributes_string = "[key: string]: any" + + return ( + f"interface {interface_name} " if interface_name else "" + ) + f"{{\n{indent(attributes_string, ' ')}\n}}" + + @property + def typescript_type(self) -> str: + if self.type == JSONSchema.Type.BOOLEAN: + return "boolean" + elif self.type in {JSONSchema.Type.INTEGER, JSONSchema.Type.NUMBER}: + return "number" + elif self.type == JSONSchema.Type.STRING: + return "string" + elif self.type == JSONSchema.Type.ARRAY: + return f"Array<{self.items.typescript_type}>" if self.items else "Array" + elif self.type == JSONSchema.Type.OBJECT: + if not self.properties: + return "Record" + return self.to_typescript_object_interface() + elif self.enum: + return " | ".join(repr(v) for v in self.enum) + else: + raise NotImplementedError( + f"JSONSchema.typescript_type does not support Type.{self.type.name} yet" + ) diff --git a/autogpts/autogpt/autogpt/core/utils/json_utils.py b/autogpts/autogpt/autogpt/core/utils/json_utils.py new file mode 100644 index 000000000000..45650427dd4d --- /dev/null +++ b/autogpts/autogpt/autogpt/core/utils/json_utils.py @@ -0,0 +1,93 @@ +import logging +import re +from typing import Any + +import demjson3 + +logger = logging.getLogger(__name__) + + +def json_loads(json_str: str) -> Any: + """Parse a JSON string, tolerating minor syntax issues: + - Missing, extra and trailing commas + - Extraneous newlines and whitespace outside of string literals + - Inconsistent spacing after colons and commas + - Missing closing brackets or braces + - Numbers: binary, hex, octal, trailing and prefixed decimal points + - Different encodings + - Surrounding markdown code block + - Comments + + Args: + json_str: The JSON string to parse. + + Returns: + The parsed JSON object, same as built-in json.loads. + """ + # Remove possible code block + pattern = r"```(?:json|JSON)*([\s\S]*?)```" + match = re.search(pattern, json_str) + + if match: + json_str = match.group(1).strip() + + json_result = demjson3.decode(json_str, return_errors=True) + assert json_result is not None # by virtue of return_errors=True + + if json_result.errors: + logger.debug( + "JSON parse errors:\n" + "\n".join(str(e) for e in json_result.errors) + ) + + if json_result.object in (demjson3.syntax_error, demjson3.undefined): + raise ValueError( + f"Failed to parse JSON string: {json_str}", *json_result.errors + ) + + return json_result.object + + +def extract_dict_from_json(json_str: str) -> dict[str, Any]: + # Sometimes the response includes the JSON in a code block with ``` + pattern = r"```(?:json|JSON)*([\s\S]*?)```" + match = re.search(pattern, json_str) + + if match: + json_str = match.group(1).strip() + else: + # The string may contain JSON. + json_pattern = r"{[\s\S]*}" + match = re.search(json_pattern, json_str) + + if match: + json_str = match.group() + + result = json_loads(json_str) + if not isinstance(result, dict): + raise ValueError( + f"Response '''{json_str}''' evaluated to non-dict value {repr(result)}" + ) + return result + + +def extract_list_from_json(json_str: str) -> list[Any]: + # Sometimes the response includes the JSON in a code block with ``` + pattern = r"```(?:json|JSON)*([\s\S]*?)```" + match = re.search(pattern, json_str) + + if match: + json_str = match.group(1).strip() + else: + # The string may contain JSON. + json_pattern = r"\[[\s\S]*\]" + match = re.search(json_pattern, json_str) + + if match: + json_str = match.group() + + result = json_loads(json_str) + if not isinstance(result, list): + raise ValueError( + f"Response '''{json_str}''' evaluated to non-list value {repr(result)}" + ) + return result diff --git a/autogpts/autogpt/autogpt/core/workspace/__init__.py b/autogpts/autogpt/autogpt/core/workspace/__init__.py new file mode 100644 index 000000000000..ae1877dee0d2 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/workspace/__init__.py @@ -0,0 +1,9 @@ +"""The workspace is the central hub for the Agent's on disk resources.""" +from autogpt.core.workspace.base import Workspace +from autogpt.core.workspace.simple import SimpleWorkspace, WorkspaceSettings + +__all__ = [ + "SimpleWorkspace", + "Workspace", + "WorkspaceSettings", +] diff --git a/autogpts/autogpt/autogpt/core/workspace/base.py b/autogpts/autogpt/autogpt/core/workspace/base.py new file mode 100644 index 000000000000..b011056c3f97 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/workspace/base.py @@ -0,0 +1,70 @@ +from __future__ import annotations + +import abc +import logging +import typing +from pathlib import Path + +if typing.TYPE_CHECKING: + from autogpt.core.configuration import AgentConfiguration + + +class Workspace(abc.ABC): + """The workspace is the root directory for all generated files. + + The workspace is responsible for creating the root directory and + providing a method for getting the full path to an item in the + workspace. + + """ + + @property + @abc.abstractmethod + def root(self) -> Path: + """The root directory of the workspace.""" + ... + + @property + @abc.abstractmethod + def restrict_to_workspace(self) -> bool: + """Whether to restrict generated paths to the workspace.""" + ... + + @staticmethod + @abc.abstractmethod + def setup_workspace( + configuration: AgentConfiguration, logger: logging.Logger + ) -> Path: + """Create the workspace root directory and set up all initial content. + + Parameters + ---------- + configuration + The Agent's configuration. + logger + The Agent's logger. + + Returns + ------- + Path + The path to the workspace root directory. + + """ + ... + + @abc.abstractmethod + def get_path(self, relative_path: str | Path) -> Path: + """Get the full path for an item in the workspace. + + Parameters + ---------- + relative_path + The path to the item relative to the workspace root. + + Returns + ------- + Path + The full path to the item. + + """ + ... diff --git a/autogpts/autogpt/autogpt/core/workspace/simple.py b/autogpts/autogpt/autogpt/core/workspace/simple.py new file mode 100644 index 000000000000..1c7a3f903370 --- /dev/null +++ b/autogpts/autogpt/autogpt/core/workspace/simple.py @@ -0,0 +1,194 @@ +import json +import logging +import typing +from pathlib import Path + +from pydantic import SecretField + +from autogpt.core.configuration import ( + Configurable, + SystemConfiguration, + SystemSettings, + UserConfigurable, +) +from autogpt.core.workspace.base import Workspace + +if typing.TYPE_CHECKING: + # Cyclic import + from autogpt.core.agent.simple import AgentSettings + + +class WorkspaceConfiguration(SystemConfiguration): + root: str + parent: str = UserConfigurable() + restrict_to_workspace: bool = UserConfigurable() + + +class WorkspaceSettings(SystemSettings): + configuration: WorkspaceConfiguration + + +class SimpleWorkspace(Configurable, Workspace): + default_settings = WorkspaceSettings( + name="workspace", + description="The workspace is the root directory for all agent activity.", + configuration=WorkspaceConfiguration( + root="", + parent="~/auto-gpt/agents", + restrict_to_workspace=True, + ), + ) + + NULL_BYTES = ["\0", "\000", "\x00", "\u0000", "%00"] + + def __init__( + self, + settings: WorkspaceSettings, + logger: logging.Logger, + ): + self._configuration = settings.configuration + self._logger = logger.getChild("workspace") + + @property + def root(self) -> Path: + return Path(self._configuration.root) + + @property + def debug_log_path(self) -> Path: + return self.root / "logs" / "debug.log" + + @property + def cycle_log_path(self) -> Path: + return self.root / "logs" / "cycle.log" + + @property + def configuration_path(self) -> Path: + return self.root / "configuration.yml" + + @property + def restrict_to_workspace(self) -> bool: + return self._configuration.restrict_to_workspace + + def get_path(self, relative_path: str | Path) -> Path: + """Get the full path for an item in the workspace. + + Parameters + ---------- + relative_path + The relative path to resolve in the workspace. + + Returns + ------- + Path + The resolved path relative to the workspace. + + """ + return self._sanitize_path( + relative_path, + root=self.root, + restrict_to_root=self.restrict_to_workspace, + ) + + def _sanitize_path( + self, + relative_path: str | Path, + root: str | Path = None, + restrict_to_root: bool = True, + ) -> Path: + """Resolve the relative path within the given root if possible. + + Parameters + ---------- + relative_path + The relative path to resolve. + root + The root path to resolve the relative path within. + restrict_to_root + Whether to restrict the path to the root. + + Returns + ------- + Path + The resolved path. + + Raises + ------ + ValueError + If the path is absolute and a root is provided. + ValueError + If the path is outside the root and the root is restricted. + + """ + + # Posix systems disallow null bytes in paths. Windows is agnostic about it. + # Do an explicit check here for all sorts of null byte representations. + + for null_byte in self.NULL_BYTES: + if null_byte in str(relative_path) or null_byte in str(root): + raise ValueError("embedded null byte") + + if root is None: + return Path(relative_path).resolve() + + self._logger.debug(f"Resolving path '{relative_path}' in workspace '{root}'") + root, relative_path = Path(root).resolve(), Path(relative_path) + self._logger.debug(f"Resolved root as '{root}'") + + if relative_path.is_absolute(): + raise ValueError( + f"Attempted to access absolute path '{relative_path}' " + f"in workspace '{root}'." + ) + full_path = root.joinpath(relative_path).resolve() + + self._logger.debug(f"Joined paths as '{full_path}'") + + if restrict_to_root and not full_path.is_relative_to(root): + raise ValueError( + f"Attempted to access path '{full_path}' outside of workspace '{root}'." + ) + + return full_path + + ################################### + # Factory methods for agent setup # + ################################### + + @staticmethod + def setup_workspace(settings: "AgentSettings", logger: logging.Logger) -> Path: + workspace_parent = settings.workspace.configuration.parent + workspace_parent = Path(workspace_parent).expanduser().resolve() + workspace_parent.mkdir(parents=True, exist_ok=True) + + agent_name = settings.agent.name + + workspace_root = workspace_parent / agent_name + workspace_root.mkdir(parents=True, exist_ok=True) + + settings.workspace.configuration.root = str(workspace_root) + + with (workspace_root / "agent_settings.json").open("w") as f: + settings_json = settings.json( + encoder=lambda x: x.get_secret_value() + if isinstance(x, SecretField) + else x, + ) + f.write(settings_json) + + # TODO: What are all the kinds of logs we want here? + log_path = workspace_root / "logs" + log_path.mkdir(parents=True, exist_ok=True) + (log_path / "debug.log").touch() + (log_path / "cycle.log").touch() + + return workspace_root + + @staticmethod + def load_agent_settings(workspace_root: Path) -> "AgentSettings": + # Cyclic import + from autogpt.core.agent.simple import AgentSettings + + with (workspace_root / "agent_settings.json").open("r") as f: + agent_settings = json.load(f) + + return AgentSettings.parse_obj(agent_settings) diff --git a/autogpts/autogpt/autogpt/file_storage/__init__.py b/autogpts/autogpt/autogpt/file_storage/__init__.py new file mode 100644 index 000000000000..8e4116f39136 --- /dev/null +++ b/autogpts/autogpt/autogpt/file_storage/__init__.py @@ -0,0 +1,44 @@ +import enum +from pathlib import Path + +from .base import FileStorage + + +class FileStorageBackendName(str, enum.Enum): + LOCAL = "local" + GCS = "gcs" + S3 = "s3" + + +def get_storage( + backend: FileStorageBackendName, + root_path: Path = ".", + restrict_to_root: bool = True, +) -> FileStorage: + match backend: + case FileStorageBackendName.LOCAL: + from .local import FileStorageConfiguration, LocalFileStorage + + config = FileStorageConfiguration.from_env() + config.root = root_path + config.restrict_to_root = restrict_to_root + return LocalFileStorage(config) + case FileStorageBackendName.S3: + from .s3 import S3FileStorage, S3FileStorageConfiguration + + config = S3FileStorageConfiguration.from_env() + config.root = root_path + return S3FileStorage(config) + case FileStorageBackendName.GCS: + from .gcs import GCSFileStorage, GCSFileStorageConfiguration + + config = GCSFileStorageConfiguration.from_env() + config.root = root_path + return GCSFileStorage(config) + + +__all__ = [ + "FileStorage", + "FileStorageBackendName", + "get_storage", +] diff --git a/autogpts/autogpt/autogpt/file_storage/base.py b/autogpts/autogpt/autogpt/file_storage/base.py new file mode 100644 index 000000000000..62521bb4ae68 --- /dev/null +++ b/autogpts/autogpt/autogpt/file_storage/base.py @@ -0,0 +1,204 @@ +""" +The FileStorage class provides an interface for interacting with a file storage. +""" + +from __future__ import annotations + +import logging +import os +from abc import ABC, abstractmethod +from io import IOBase, TextIOBase +from pathlib import Path +from typing import IO, Any, BinaryIO, Callable, Literal, TextIO, overload + +from autogpt.core.configuration.schema import SystemConfiguration + +logger = logging.getLogger(__name__) + + +class FileStorageConfiguration(SystemConfiguration): + restrict_to_root: bool = True + root: Path = Path("/") + + +class FileStorage(ABC): + """A class that represents a file storage.""" + + on_write_file: Callable[[Path], Any] | None = None + """ + Event hook, executed after writing a file. + + Params: + Path: The path of the file that was written, relative to the storage root. + """ + + @property + @abstractmethod + def root(self) -> Path: + """The root path of the file storage.""" + + @property + @abstractmethod + def restrict_to_root(self) -> bool: + """Whether to restrict file access to within the storage's root path.""" + + @property + @abstractmethod + def is_local(self) -> bool: + """Whether the storage is local (i.e. on the same machine, not cloud-based).""" + + @abstractmethod + def initialize(self) -> None: + """ + Calling `initialize()` should bring the storage to a ready-to-use state. + For example, it can create the resource in which files will be stored, if it + doesn't exist yet. E.g. a folder on disk, or an S3 Bucket. + """ + + @overload + @abstractmethod + def open_file( + self, + path: str | Path, + mode: Literal["w", "r"] = "r", + binary: Literal[False] = False, + ) -> TextIO | TextIOBase: + """Returns a readable text file-like object representing the file.""" + + @overload + @abstractmethod + def open_file( + self, + path: str | Path, + mode: Literal["w", "r"] = "r", + binary: Literal[True] = True, + ) -> BinaryIO | IOBase: + """Returns a readable binary file-like object representing the file.""" + + @abstractmethod + def open_file( + self, path: str | Path, mode: Literal["w", "r"] = "r", binary: bool = False + ) -> IO | IOBase: + """Returns a readable file-like object representing the file.""" + + @overload + @abstractmethod + def read_file(self, path: str | Path, binary: Literal[False] = False) -> str: + """Read a file in the storage as text.""" + ... + + @overload + @abstractmethod + def read_file(self, path: str | Path, binary: Literal[True] = True) -> bytes: + """Read a file in the storage as binary.""" + ... + + @abstractmethod + def read_file(self, path: str | Path, binary: bool = False) -> str | bytes: + """Read a file in the storage.""" + + @abstractmethod + async def write_file(self, path: str | Path, content: str | bytes) -> None: + """Write to a file in the storage.""" + + @abstractmethod + def list_files(self, path: str | Path = ".") -> list[Path]: + """List all files (recursively) in a directory in the storage.""" + + @abstractmethod + def list_folders( + self, path: str | Path = ".", recursive: bool = False + ) -> list[Path]: + """List all folders in a directory in the storage.""" + + @abstractmethod + def delete_file(self, path: str | Path) -> None: + """Delete a file in the storage.""" + + @abstractmethod + def delete_dir(self, path: str | Path) -> None: + """Delete an empty folder in the storage.""" + + @abstractmethod + def exists(self, path: str | Path) -> bool: + """Check if a file or folder exists in the storage.""" + + @abstractmethod + def rename(self, old_path: str | Path, new_path: str | Path) -> None: + """Rename a file or folder in the storage.""" + + @abstractmethod + def copy(self, source: str | Path, destination: str | Path) -> None: + """Copy a file or folder with all contents in the storage.""" + + @abstractmethod + def make_dir(self, path: str | Path) -> None: + """Create a directory in the storage if doesn't exist.""" + + @abstractmethod + def clone_with_subroot(self, subroot: str | Path) -> FileStorage: + """Create a new FileStorage with a subroot of the current storage.""" + + def get_path(self, relative_path: str | Path) -> Path: + """Get the full path for an item in the storage. + + Parameters: + relative_path: The relative path to resolve in the storage. + + Returns: + Path: The resolved path relative to the storage. + """ + return self._sanitize_path(relative_path) + + def _sanitize_path( + self, + path: str | Path, + ) -> Path: + """Resolve the relative path within the given root if possible. + + Parameters: + relative_path: The relative path to resolve. + + Returns: + Path: The resolved path. + + Raises: + ValueError: If the path is absolute and a root is provided. + ValueError: If the path is outside the root and the root is restricted. + """ + + # Posix systems disallow null bytes in paths. Windows is agnostic about it. + # Do an explicit check here for all sorts of null byte representations. + if "\0" in str(path): + raise ValueError("Embedded null byte") + + logger.debug(f"Resolving path '{path}' in storage '{self.root}'") + + relative_path = Path(path) + + # Allow absolute paths if they are contained in the storage. + if ( + relative_path.is_absolute() + and self.restrict_to_root + and not relative_path.is_relative_to(self.root) + ): + raise ValueError( + f"Attempted to access absolute path '{relative_path}' " + f"in storage '{self.root}'" + ) + + full_path = self.root / relative_path + if self.is_local: + full_path = full_path.resolve() + else: + full_path = Path(os.path.normpath(full_path)) + + logger.debug(f"Joined paths as '{full_path}'") + + if self.restrict_to_root and not full_path.is_relative_to(self.root): + raise ValueError( + f"Attempted to access path '{full_path}' " + f"outside of storage '{self.root}'." + ) + + return full_path diff --git a/autogpts/autogpt/autogpt/file_storage/gcs.py b/autogpts/autogpt/autogpt/file_storage/gcs.py new file mode 100644 index 000000000000..45545d4495a7 --- /dev/null +++ b/autogpts/autogpt/autogpt/file_storage/gcs.py @@ -0,0 +1,213 @@ +""" +The GCSWorkspace class provides an interface for interacting with a file workspace, and +stores the files in a Google Cloud Storage bucket. +""" + +from __future__ import annotations + +import inspect +import logging +from io import IOBase +from pathlib import Path +from typing import Literal + +from google.cloud import storage +from google.cloud.exceptions import NotFound + +from autogpt.core.configuration.schema import UserConfigurable + +from .base import FileStorage, FileStorageConfiguration + +logger = logging.getLogger(__name__) + + +class GCSFileStorageConfiguration(FileStorageConfiguration): + bucket: str = UserConfigurable("autogpt", from_env="STORAGE_BUCKET") + + +class GCSFileStorage(FileStorage): + """A class that represents a Google Cloud Storage.""" + + _bucket: storage.Bucket + + def __init__(self, config: GCSFileStorageConfiguration): + self._bucket_name = config.bucket + self._root = config.root + # Add / at the beginning of the root path + if not self._root.is_absolute(): + self._root = Path("/").joinpath(self._root) + + self._gcs = storage.Client() + super().__init__() + + @property + def root(self) -> Path: + """The root directory of the file storage.""" + return self._root + + @property + def restrict_to_root(self) -> bool: + """Whether to restrict generated paths to the root.""" + return True + + @property + def is_local(self) -> bool: + """Whether the storage is local (i.e. on the same machine, not cloud-based).""" + return False + + def initialize(self) -> None: + logger.debug(f"Initializing {repr(self)}...") + try: + self._bucket = self._gcs.get_bucket(self._bucket_name) + except NotFound: + logger.info(f"Bucket '{self._bucket_name}' does not exist; creating it...") + self._bucket = self._gcs.create_bucket(self._bucket_name) + + def get_path(self, relative_path: str | Path) -> Path: + # We set GCS root with "/" at the beginning + # but relative_to("/") will remove it + # because we don't actually want it in the storage filenames + return super().get_path(relative_path).relative_to("/") + + def _get_blob(self, path: str | Path) -> storage.Blob: + path = self.get_path(path) + return self._bucket.blob(str(path)) + + def open_file( + self, path: str | Path, mode: Literal["w", "r"] = "r", binary: bool = False + ) -> IOBase: + """Open a file in the storage.""" + blob = self._get_blob(path) + blob.reload() # pin revision number to prevent version mixing while reading + return blob.open(f"{mode}b" if binary else mode) + + def read_file(self, path: str | Path, binary: bool = False) -> str | bytes: + """Read a file in the storage.""" + return self.open_file(path, "r", binary).read() + + async def write_file(self, path: str | Path, content: str | bytes) -> None: + """Write to a file in the storage.""" + blob = self._get_blob(path) + + blob.upload_from_string( + data=content, + content_type=( + "text/plain" + if type(content) is str + # TODO: get MIME type from file extension or binary content + else "application/octet-stream" + ), + ) + + if self.on_write_file: + path = Path(path) + if path.is_absolute(): + path = path.relative_to(self.root) + res = self.on_write_file(path) + if inspect.isawaitable(res): + await res + + def list_files(self, path: str | Path = ".") -> list[Path]: + """List all files (recursively) in a directory in the storage.""" + path = self.get_path(path) + return [ + Path(blob.name).relative_to(path) + for blob in self._bucket.list_blobs( + prefix=f"{path}/" if path != Path(".") else None + ) + ] + + def list_folders( + self, path: str | Path = ".", recursive: bool = False + ) -> list[Path]: + """List 'directories' directly in a given path or recursively in the storage.""" + path = self.get_path(path) + folder_names = set() + + # List objects with the specified prefix and delimiter + for blob in self._bucket.list_blobs(prefix=path): + # Remove path prefix and the object name (last part) + folder = Path(blob.name).relative_to(path).parent + if not folder or folder == Path("."): + continue + # For non-recursive, only add the first level of folders + if not recursive: + folder_names.add(folder.parts[0]) + else: + # For recursive, need to add all nested folders + for i in range(len(folder.parts)): + folder_names.add("/".join(folder.parts[: i + 1])) + + return [Path(f) for f in folder_names] + + def delete_file(self, path: str | Path) -> None: + """Delete a file in the storage.""" + path = self.get_path(path) + blob = self._bucket.blob(str(path)) + blob.delete() + + def delete_dir(self, path: str | Path) -> None: + """Delete an empty folder in the storage.""" + # Since GCS does not have directories, we don't need to do anything + pass + + def exists(self, path: str | Path) -> bool: + """Check if a file or folder exists in GCS storage.""" + path = self.get_path(path) + # Check for exact blob match (file) + blob = self._bucket.blob(str(path)) + if blob.exists(): + return True + # Check for any blobs with prefix (folder) + prefix = f"{str(path).rstrip('/')}/" + blobs = self._bucket.list_blobs(prefix=prefix, max_results=1) + return next(blobs, None) is not None + + def make_dir(self, path: str | Path) -> None: + """Create a directory in the storage if doesn't exist.""" + # GCS does not have directories, so we don't need to do anything + pass + + def rename(self, old_path: str | Path, new_path: str | Path) -> None: + """Rename a file or folder in the storage.""" + old_path = self.get_path(old_path) + new_path = self.get_path(new_path) + blob = self._bucket.blob(str(old_path)) + # If the blob with exact name exists, rename it + if blob.exists(): + self._bucket.rename_blob(blob, new_name=str(new_path)) + return + # Otherwise, rename all blobs with the prefix (folder) + for blob in self._bucket.list_blobs(prefix=f"{old_path}/"): + new_name = str(blob.name).replace(str(old_path), str(new_path), 1) + self._bucket.rename_blob(blob, new_name=new_name) + + def copy(self, source: str | Path, destination: str | Path) -> None: + """Copy a file or folder with all contents in the storage.""" + source = self.get_path(source) + destination = self.get_path(destination) + # If the source is a file, copy it + if self._bucket.blob(str(source)).exists(): + self._bucket.copy_blob( + self._bucket.blob(str(source)), self._bucket, str(destination) + ) + return + # Otherwise, copy all blobs with the prefix (folder) + for blob in self._bucket.list_blobs(prefix=f"{source}/"): + new_name = str(blob.name).replace(str(source), str(destination), 1) + self._bucket.copy_blob(blob, self._bucket, new_name) + + def clone_with_subroot(self, subroot: str | Path) -> GCSFileStorage: + """Create a new GCSFileStorage with a subroot of the current storage.""" + file_storage = GCSFileStorage( + GCSFileStorageConfiguration( + root=Path("/").joinpath(self.get_path(subroot)), + bucket=self._bucket_name, + ) + ) + file_storage._gcs = self._gcs + file_storage._bucket = self._bucket + return file_storage + + def __repr__(self) -> str: + return f"{__class__.__name__}(bucket='{self._bucket_name}', root={self._root})" diff --git a/autogpts/autogpt/autogpt/file_storage/local.py b/autogpts/autogpt/autogpt/file_storage/local.py new file mode 100644 index 000000000000..3a52bd572499 --- /dev/null +++ b/autogpts/autogpt/autogpt/file_storage/local.py @@ -0,0 +1,139 @@ +""" +The LocalFileStorage class implements a FileStorage that works with local files. +""" + +from __future__ import annotations + +import inspect +import logging +from pathlib import Path +from typing import IO, Literal + +from .base import FileStorage, FileStorageConfiguration + +logger = logging.getLogger(__name__) + + +class LocalFileStorage(FileStorage): + """A class that represents a file storage.""" + + def __init__(self, config: FileStorageConfiguration): + self._root = config.root.resolve() + self._restrict_to_root = config.restrict_to_root + self.make_dir(self.root) + super().__init__() + + @property + def root(self) -> Path: + """The root directory of the file storage.""" + return self._root + + @property + def restrict_to_root(self) -> bool: + """Whether to restrict generated paths to the root.""" + return self._restrict_to_root + + @property + def is_local(self) -> bool: + """Whether the storage is local (i.e. on the same machine, not cloud-based).""" + return True + + def initialize(self) -> None: + self.root.mkdir(exist_ok=True, parents=True) + + def open_file( + self, path: str | Path, mode: Literal["w", "r"] = "r", binary: bool = False + ) -> IO: + """Open a file in the storage.""" + return self._open_file(path, f"{mode}b" if binary else mode) + + def _open_file(self, path: str | Path, mode: str) -> IO: + full_path = self.get_path(path) + return open(full_path, mode) # type: ignore + + def read_file(self, path: str | Path, binary: bool = False) -> str | bytes: + """Read a file in the storage.""" + with self._open_file(path, "rb" if binary else "r") as file: + return file.read() + + async def write_file(self, path: str | Path, content: str | bytes) -> None: + """Write to a file in the storage.""" + with self._open_file(path, "wb" if type(content) is bytes else "w") as file: + file.write(content) + + if self.on_write_file: + path = Path(path) + if path.is_absolute(): + path = path.relative_to(self.root) + res = self.on_write_file(path) + if inspect.isawaitable(res): + await res + + def list_files(self, path: str | Path = ".") -> list[Path]: + """List all files (recursively) in a directory in the storage.""" + path = self.get_path(path) + return [file.relative_to(path) for file in path.rglob("*") if file.is_file()] + + def list_folders( + self, path: str | Path = ".", recursive: bool = False + ) -> list[Path]: + """List directories directly in a given path or recursively.""" + path = self.get_path(path) + if recursive: + return [ + folder.relative_to(path) + for folder in path.rglob("*") + if folder.is_dir() + ] + else: + return [ + folder.relative_to(path) for folder in path.iterdir() if folder.is_dir() + ] + + def delete_file(self, path: str | Path) -> None: + """Delete a file in the storage.""" + full_path = self.get_path(path) + full_path.unlink() + + def delete_dir(self, path: str | Path) -> None: + """Delete an empty folder in the storage.""" + full_path = self.get_path(path) + full_path.rmdir() + + def exists(self, path: str | Path) -> bool: + """Check if a file or folder exists in the storage.""" + return self.get_path(path).exists() + + def make_dir(self, path: str | Path) -> None: + """Create a directory in the storage if doesn't exist.""" + full_path = self.get_path(path) + full_path.mkdir(exist_ok=True, parents=True) + + def rename(self, old_path: str | Path, new_path: str | Path) -> None: + """Rename a file or folder in the storage.""" + old_path = self.get_path(old_path) + new_path = self.get_path(new_path) + old_path.rename(new_path) + + def copy(self, source: str | Path, destination: str | Path) -> None: + """Copy a file or folder with all contents in the storage.""" + source = self.get_path(source) + destination = self.get_path(destination) + if source.is_file(): + destination.write_bytes(source.read_bytes()) + else: + destination.mkdir(exist_ok=True, parents=True) + for file in source.rglob("*"): + if file.is_file(): + target = destination / file.relative_to(source) + target.parent.mkdir(exist_ok=True, parents=True) + target.write_bytes(file.read_bytes()) + + def clone_with_subroot(self, subroot: str | Path) -> FileStorage: + """Create a new LocalFileStorage with a subroot of the current storage.""" + return LocalFileStorage( + FileStorageConfiguration( + root=self.get_path(subroot), + restrict_to_root=self.restrict_to_root, + ) + ) diff --git a/autogpts/autogpt/autogpt/file_storage/s3.py b/autogpts/autogpt/autogpt/file_storage/s3.py new file mode 100644 index 000000000000..f8ac15fe70c1 --- /dev/null +++ b/autogpts/autogpt/autogpt/file_storage/s3.py @@ -0,0 +1,265 @@ +""" +The S3Workspace class provides an interface for interacting with a file workspace, and +stores the files in an S3 bucket. +""" + +from __future__ import annotations + +import contextlib +import inspect +import logging +from io import IOBase, TextIOWrapper +from pathlib import Path +from typing import TYPE_CHECKING, Literal, Optional + +import boto3 +import botocore.exceptions +from pydantic import SecretStr + +from autogpt.core.configuration.schema import UserConfigurable + +from .base import FileStorage, FileStorageConfiguration + +if TYPE_CHECKING: + import mypy_boto3_s3 + +logger = logging.getLogger(__name__) + + +class S3FileStorageConfiguration(FileStorageConfiguration): + bucket: str = UserConfigurable("autogpt", from_env="STORAGE_BUCKET") + s3_endpoint_url: Optional[SecretStr] = UserConfigurable(from_env="S3_ENDPOINT_URL") + + +class S3FileStorage(FileStorage): + """A class that represents an S3 storage.""" + + _bucket: mypy_boto3_s3.service_resource.Bucket + + def __init__(self, config: S3FileStorageConfiguration): + self._bucket_name = config.bucket + self._root = config.root + # Add / at the beginning of the root path + if not self._root.is_absolute(): + self._root = Path("/").joinpath(self._root) + + # https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html + self._s3 = boto3.resource( + "s3", + endpoint_url=( + config.s3_endpoint_url.get_secret_value() + if config.s3_endpoint_url + else None + ), + ) + + super().__init__() + + @property + def root(self) -> Path: + """The root directory of the file storage.""" + return self._root + + @property + def restrict_to_root(self): + """Whether to restrict generated paths to the root.""" + return True + + @property + def is_local(self) -> bool: + """Whether the storage is local (i.e. on the same machine, not cloud-based).""" + return False + + def initialize(self) -> None: + logger.debug(f"Initializing {repr(self)}...") + try: + self._s3.meta.client.head_bucket(Bucket=self._bucket_name) + self._bucket = self._s3.Bucket(self._bucket_name) + except botocore.exceptions.ClientError as e: + if "(404)" not in str(e): + raise + logger.info(f"Bucket '{self._bucket_name}' does not exist; creating it...") + self._bucket = self._s3.create_bucket(Bucket=self._bucket_name) + + def get_path(self, relative_path: str | Path) -> Path: + # We set S3 root with "/" at the beginning + # but relative_to("/") will remove it + # because we don't actually want it in the storage filenames + return super().get_path(relative_path).relative_to("/") + + def _get_obj(self, path: str | Path) -> mypy_boto3_s3.service_resource.Object: + """Get an S3 object.""" + path = self.get_path(path) + obj = self._bucket.Object(str(path)) + with contextlib.suppress(botocore.exceptions.ClientError): + obj.load() + return obj + + def open_file( + self, path: str | Path, mode: Literal["w", "r"] = "r", binary: bool = False + ) -> IOBase: + """Open a file in the storage.""" + obj = self._get_obj(path) + return obj.get()["Body"] if binary else TextIOWrapper(obj.get()["Body"]) + + def read_file(self, path: str | Path, binary: bool = False) -> str | bytes: + """Read a file in the storage.""" + return self.open_file(path, binary=binary).read() + + async def write_file(self, path: str | Path, content: str | bytes) -> None: + """Write to a file in the storage.""" + obj = self._get_obj(path) + obj.put(Body=content) + + if self.on_write_file: + path = Path(path) + if path.is_absolute(): + path = path.relative_to(self.root) + res = self.on_write_file(path) + if inspect.isawaitable(res): + await res + + def list_files(self, path: str | Path = ".") -> list[Path]: + """List all files (recursively) in a directory in the storage.""" + path = self.get_path(path) + if path == Path("."): # root level of bucket + return [Path(obj.key) for obj in self._bucket.objects.all()] + else: + return [ + Path(obj.key).relative_to(path) + for obj in self._bucket.objects.filter(Prefix=f"{path}/") + ] + + def list_folders( + self, path: str | Path = ".", recursive: bool = False + ) -> list[Path]: + """List 'directories' directly in a given path or recursively in the storage.""" + path = self.get_path(path) + folder_names = set() + + # List objects with the specified prefix and delimiter + for obj_summary in self._bucket.objects.filter(Prefix=str(path)): + # Remove path prefix and the object name (last part) + folder = Path(obj_summary.key).relative_to(path).parent + if not folder or folder == Path("."): + continue + # For non-recursive, only add the first level of folders + if not recursive: + folder_names.add(folder.parts[0]) + else: + # For recursive, need to add all nested folders + for i in range(len(folder.parts)): + folder_names.add("/".join(folder.parts[: i + 1])) + + return [Path(f) for f in folder_names] + + def delete_file(self, path: str | Path) -> None: + """Delete a file in the storage.""" + path = self.get_path(path) + obj = self._s3.Object(self._bucket_name, str(path)) + obj.delete() + + def delete_dir(self, path: str | Path) -> None: + """Delete an empty folder in the storage.""" + # S3 does not have directories, so we don't need to do anything + pass + + def exists(self, path: str | Path) -> bool: + """Check if a file or folder exists in S3 storage.""" + path = self.get_path(path) + try: + # Check for exact object match (file) + self._s3.meta.client.head_object(Bucket=self._bucket_name, Key=str(path)) + return True + except botocore.exceptions.ClientError as e: + if int(e.response["ResponseMetadata"]["HTTPStatusCode"]) == 404: + # If the object does not exist, + # check for objects with the prefix (folder) + prefix = f"{str(path).rstrip('/')}/" + objs = list(self._bucket.objects.filter(Prefix=prefix, MaxKeys=1)) + return len(objs) > 0 # True if any objects exist with the prefix + else: + raise # Re-raise for any other client errors + + def make_dir(self, path: str | Path) -> None: + """Create a directory in the storage if doesn't exist.""" + # S3 does not have directories, so we don't need to do anything + pass + + def rename(self, old_path: str | Path, new_path: str | Path) -> None: + """Rename a file or folder in the storage.""" + old_path = str(self.get_path(old_path)) + new_path = str(self.get_path(new_path)) + + try: + # If file exists, rename it + self._s3.meta.client.head_object(Bucket=self._bucket_name, Key=old_path) + self._s3.meta.client.copy_object( + CopySource={"Bucket": self._bucket_name, "Key": old_path}, + Bucket=self._bucket_name, + Key=new_path, + ) + self._s3.meta.client.delete_object(Bucket=self._bucket_name, Key=old_path) + except botocore.exceptions.ClientError as e: + if int(e.response["ResponseMetadata"]["HTTPStatusCode"]) == 404: + # If the object does not exist, + # it may be a folder + prefix = f"{old_path.rstrip('/')}/" + objs = list(self._bucket.objects.filter(Prefix=prefix)) + for obj in objs: + new_key = new_path + obj.key[len(old_path) :] + self._s3.meta.client.copy_object( + CopySource={"Bucket": self._bucket_name, "Key": obj.key}, + Bucket=self._bucket_name, + Key=new_key, + ) + self._s3.meta.client.delete_object( + Bucket=self._bucket_name, Key=obj.key + ) + else: + raise # Re-raise for any other client errors + + def copy(self, source: str | Path, destination: str | Path) -> None: + """Copy a file or folder with all contents in the storage.""" + source = str(self.get_path(source)) + destination = str(self.get_path(destination)) + + try: + # If source is a file, copy it + self._s3.meta.client.head_object(Bucket=self._bucket_name, Key=source) + self._s3.meta.client.copy_object( + CopySource={"Bucket": self._bucket_name, "Key": source}, + Bucket=self._bucket_name, + Key=destination, + ) + except botocore.exceptions.ClientError as e: + if int(e.response["ResponseMetadata"]["HTTPStatusCode"]) == 404: + # If the object does not exist, + # it may be a folder + prefix = f"{source.rstrip('/')}/" + objs = list(self._bucket.objects.filter(Prefix=prefix)) + for obj in objs: + new_key = destination + obj.key[len(source) :] + self._s3.meta.client.copy_object( + CopySource={"Bucket": self._bucket_name, "Key": obj.key}, + Bucket=self._bucket_name, + Key=new_key, + ) + else: + raise + + def clone_with_subroot(self, subroot: str | Path) -> S3FileStorage: + """Create a new S3FileStorage with a subroot of the current storage.""" + file_storage = S3FileStorage( + S3FileStorageConfiguration( + bucket=self._bucket_name, + root=Path("/").joinpath(self.get_path(subroot)), + s3_endpoint_url=self._s3.meta.client.meta.endpoint_url, + ) + ) + file_storage._s3 = self._s3 + file_storage._bucket = self._bucket + return file_storage + + def __repr__(self) -> str: + return f"{__class__.__name__}(bucket='{self._bucket_name}', root={self._root})" diff --git a/autogpts/autogpt/autogpt/llm/providers/__init__.py b/autogpts/autogpt/autogpt/llm/providers/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/autogpts/autogpt/autogpt/llm/providers/openai.py b/autogpts/autogpt/autogpt/llm/providers/openai.py new file mode 100644 index 000000000000..aac24ffc8837 --- /dev/null +++ b/autogpts/autogpt/autogpt/llm/providers/openai.py @@ -0,0 +1,28 @@ +from __future__ import annotations + +import logging +from typing import Callable, Iterable, TypeVar + +from autogpt.core.resource.model_providers import CompletionModelFunction +from autogpt.models.command import Command + +logger = logging.getLogger(__name__) + + +T = TypeVar("T", bound=Callable) + + +def get_openai_command_specs( + commands: Iterable[Command], +) -> list[CompletionModelFunction]: + """Get OpenAI-consumable function specs for the agent's available commands. + see https://platform.openai.com/docs/guides/gpt/function-calling + """ + return [ + CompletionModelFunction( + name=command.name, + description=command.description, + parameters={param.name: param.spec for param in command.parameters}, + ) + for command in commands + ] diff --git a/autogpts/autogpt/autogpt/logs/__init__.py b/autogpts/autogpt/autogpt/logs/__init__.py new file mode 100644 index 000000000000..dc99649e5066 --- /dev/null +++ b/autogpts/autogpt/autogpt/logs/__init__.py @@ -0,0 +1,26 @@ +from .config import configure_chat_plugins, configure_logging +from .helpers import user_friendly_output +from .log_cycle import ( + CURRENT_CONTEXT_FILE_NAME, + NEXT_ACTION_FILE_NAME, + PROMPT_SUMMARY_FILE_NAME, + PROMPT_SUPERVISOR_FEEDBACK_FILE_NAME, + SUMMARY_FILE_NAME, + SUPERVISOR_FEEDBACK_FILE_NAME, + USER_INPUT_FILE_NAME, + LogCycleHandler, +) + +__all__ = [ + "configure_logging", + "configure_chat_plugins", + "user_friendly_output", + "CURRENT_CONTEXT_FILE_NAME", + "NEXT_ACTION_FILE_NAME", + "PROMPT_SUMMARY_FILE_NAME", + "PROMPT_SUPERVISOR_FEEDBACK_FILE_NAME", + "SUMMARY_FILE_NAME", + "SUPERVISOR_FEEDBACK_FILE_NAME", + "USER_INPUT_FILE_NAME", + "LogCycleHandler", +] diff --git a/autogpts/autogpt/autogpt/logs/config.py b/autogpts/autogpt/autogpt/logs/config.py new file mode 100644 index 000000000000..3b6860e8fb20 --- /dev/null +++ b/autogpts/autogpt/autogpt/logs/config.py @@ -0,0 +1,240 @@ +"""Logging module for Auto-GPT.""" +from __future__ import annotations + +import enum +import logging +import os +import sys +from pathlib import Path +from typing import TYPE_CHECKING, Optional + +from auto_gpt_plugin_template import AutoGPTPluginTemplate +from openai._base_client import log as openai_logger + +if TYPE_CHECKING: + from autogpt.config import Config + from autogpt.speech import TTSConfig + +from autogpt.core.configuration import SystemConfiguration, UserConfigurable +from autogpt.core.runner.client_lib.logging import BelowLevelFilter + +from .formatters import AutoGptFormatter, StructuredLoggingFormatter +from .handlers import TTSHandler, TypingConsoleHandler + +LOG_DIR = Path(__file__).parent.parent.parent / "logs" +LOG_FILE = "activity.log" +DEBUG_LOG_FILE = "debug.log" +ERROR_LOG_FILE = "error.log" + +SIMPLE_LOG_FORMAT = "%(asctime)s %(levelname)s %(title)s%(message)s" +DEBUG_LOG_FORMAT = ( + "%(asctime)s %(levelname)s %(filename)s:%(lineno)d" " %(title)s%(message)s" +) + +SPEECH_OUTPUT_LOGGER = "VOICE" +USER_FRIENDLY_OUTPUT_LOGGER = "USER_FRIENDLY_OUTPUT" + +_chat_plugins: list[AutoGPTPluginTemplate] = [] + + +class LogFormatName(str, enum.Enum): + SIMPLE = "simple" + DEBUG = "debug" + STRUCTURED = "structured_google_cloud" + + +TEXT_LOG_FORMAT_MAP = { + LogFormatName.DEBUG: DEBUG_LOG_FORMAT, + LogFormatName.SIMPLE: SIMPLE_LOG_FORMAT, +} + + +class LoggingConfig(SystemConfiguration): + level: int = UserConfigurable( + default=logging.INFO, + from_env=lambda: logging.getLevelName(os.getenv("LOG_LEVEL", "INFO")), + ) + + # Console output + log_format: LogFormatName = UserConfigurable( + default=LogFormatName.SIMPLE, from_env="LOG_FORMAT" + ) + plain_console_output: bool = UserConfigurable( + default=False, + from_env=lambda: os.getenv("PLAIN_OUTPUT", "False") == "True", + ) + + # File output + log_dir: Path = LOG_DIR + log_file_format: Optional[LogFormatName] = UserConfigurable( + default=LogFormatName.SIMPLE, + from_env=lambda: os.getenv( + "LOG_FILE_FORMAT", os.getenv("LOG_FORMAT", "simple") + ), + ) + + +def configure_logging( + debug: bool = False, + level: Optional[int | str] = None, + log_dir: Optional[Path] = None, + log_format: Optional[LogFormatName | str] = None, + log_file_format: Optional[LogFormatName | str] = None, + plain_console_output: Optional[bool] = None, + config: Optional[LoggingConfig] = None, + tts_config: Optional[TTSConfig] = None, +) -> None: + """Configure the native logging module, based on the environment config and any + specified overrides. + + Arguments override values specified in the environment. + Overrides are also applied to `config`, if passed. + + Should be usable as `configure_logging(**config.logging.dict())`, where + `config.logging` is a `LoggingConfig` object. + """ + if debug and level: + raise ValueError("Only one of either 'debug' and 'level' arguments may be set") + + # Parse arguments + if isinstance(level, str): + if type(_level := logging.getLevelName(level.upper())) is int: + level = _level + else: + raise ValueError(f"Unknown log level '{level}'") + if isinstance(log_format, str): + if log_format in LogFormatName._value2member_map_: + log_format = LogFormatName(log_format) + elif not isinstance(log_format, LogFormatName): + raise ValueError(f"Unknown log format '{log_format}'") + if isinstance(log_file_format, str): + if log_file_format in LogFormatName._value2member_map_: + log_file_format = LogFormatName(log_file_format) + elif not isinstance(log_file_format, LogFormatName): + raise ValueError(f"Unknown log format '{log_format}'") + + config = config or LoggingConfig.from_env() + + # Aggregate env config + arguments + config.level = logging.DEBUG if debug else level or config.level + config.log_dir = log_dir or config.log_dir + config.log_format = log_format or ( + LogFormatName.DEBUG if debug else config.log_format + ) + config.log_file_format = log_file_format or log_format or config.log_file_format + config.plain_console_output = ( + plain_console_output + if plain_console_output is not None + else config.plain_console_output + ) + + # Structured logging is used for cloud environments, + # where logging to a file makes no sense. + if config.log_format == LogFormatName.STRUCTURED: + config.plain_console_output = True + config.log_file_format = None + + # create log directory if it doesn't exist + if not config.log_dir.exists(): + config.log_dir.mkdir() + + log_handlers: list[logging.Handler] = [] + + if config.log_format in (LogFormatName.DEBUG, LogFormatName.SIMPLE): + console_format_template = TEXT_LOG_FORMAT_MAP[config.log_format] + console_formatter = AutoGptFormatter(console_format_template) + else: + console_formatter = StructuredLoggingFormatter() + console_format_template = SIMPLE_LOG_FORMAT + + # Console output handlers + stdout = logging.StreamHandler(stream=sys.stdout) + stdout.setLevel(config.level) + stdout.addFilter(BelowLevelFilter(logging.WARNING)) + stdout.setFormatter(console_formatter) + stderr = logging.StreamHandler() + stderr.setLevel(logging.WARNING) + stderr.setFormatter(console_formatter) + log_handlers += [stdout, stderr] + + # Console output handler which simulates typing + typing_console_handler = TypingConsoleHandler(stream=sys.stdout) + typing_console_handler.setLevel(logging.INFO) + typing_console_handler.setFormatter(console_formatter) + + # User friendly output logger (text + speech) + user_friendly_output_logger = logging.getLogger(USER_FRIENDLY_OUTPUT_LOGGER) + user_friendly_output_logger.setLevel(logging.INFO) + user_friendly_output_logger.addHandler( + typing_console_handler if not config.plain_console_output else stdout + ) + if tts_config: + user_friendly_output_logger.addHandler(TTSHandler(tts_config)) + user_friendly_output_logger.addHandler(stderr) + user_friendly_output_logger.propagate = False + + # File output handlers + if config.log_file_format is not None: + if config.level < logging.ERROR: + file_output_format_template = TEXT_LOG_FORMAT_MAP[config.log_file_format] + file_output_formatter = AutoGptFormatter( + file_output_format_template, no_color=True + ) + + # INFO log file handler + activity_log_handler = logging.FileHandler( + config.log_dir / LOG_FILE, "a", "utf-8" + ) + activity_log_handler.setLevel(config.level) + activity_log_handler.setFormatter(file_output_formatter) + log_handlers += [activity_log_handler] + user_friendly_output_logger.addHandler(activity_log_handler) + + # ERROR log file handler + error_log_handler = logging.FileHandler( + config.log_dir / ERROR_LOG_FILE, "a", "utf-8" + ) + error_log_handler.setLevel(logging.ERROR) + error_log_handler.setFormatter( + AutoGptFormatter(DEBUG_LOG_FORMAT, no_color=True) + ) + log_handlers += [error_log_handler] + user_friendly_output_logger.addHandler(error_log_handler) + + # Configure the root logger + logging.basicConfig( + format=console_format_template, + level=config.level, + handlers=log_handlers, + ) + + # Speech output + speech_output_logger = logging.getLogger(SPEECH_OUTPUT_LOGGER) + speech_output_logger.setLevel(logging.INFO) + if tts_config: + speech_output_logger.addHandler(TTSHandler(tts_config)) + speech_output_logger.propagate = False + + # JSON logger with better formatting + json_logger = logging.getLogger("JSON_LOGGER") + json_logger.setLevel(logging.DEBUG) + json_logger.propagate = False + + # Disable debug logging from OpenAI library + openai_logger.setLevel(logging.WARNING) + + +def configure_chat_plugins(config: Config) -> None: + """Configure chat plugins for use by the logging module""" + + logger = logging.getLogger(__name__) + + # Add chat plugins capable of report to logger + if config.chat_messages_enabled: + if _chat_plugins: + _chat_plugins.clear() + + for plugin in config.plugins: + if hasattr(plugin, "can_handle_report") and plugin.can_handle_report(): + logger.debug(f"Loaded plugin into logger: {plugin.__class__.__name__}") + _chat_plugins.append(plugin) diff --git a/autogpts/autogpt/autogpt/logs/filters.py b/autogpts/autogpt/autogpt/logs/filters.py new file mode 100644 index 000000000000..7a0ccd756950 --- /dev/null +++ b/autogpts/autogpt/autogpt/logs/filters.py @@ -0,0 +1,12 @@ +import logging + + +class BelowLevelFilter(logging.Filter): + """Filter for logging levels below a certain threshold.""" + + def __init__(self, below_level: int): + super().__init__() + self.below_level = below_level + + def filter(self, record: logging.LogRecord): + return record.levelno < self.below_level diff --git a/autogpts/autogpt/autogpt/logs/formatters.py b/autogpts/autogpt/autogpt/logs/formatters.py new file mode 100644 index 000000000000..a51112573c42 --- /dev/null +++ b/autogpts/autogpt/autogpt/logs/formatters.py @@ -0,0 +1,53 @@ +import logging + +from colorama import Style +from google.cloud.logging_v2.handlers import CloudLoggingFilter, StructuredLogHandler + +from autogpt.core.runner.client_lib.logging import FancyConsoleFormatter + +from .utils import remove_color_codes + + +class AutoGptFormatter(FancyConsoleFormatter): + def __init__(self, *args, no_color: bool = False, **kwargs): + super().__init__(*args, **kwargs) + self.no_color = no_color + + def format(self, record: logging.LogRecord) -> str: + # Make sure `msg` is a string + if not hasattr(record, "msg"): + record.msg = "" + elif not type(record.msg) is str: + record.msg = str(record.msg) + + # Strip color from the message to prevent color spoofing + if record.msg and not getattr(record, "preserve_color", False): + record.msg = remove_color_codes(record.msg) + + # Determine color for title + title = getattr(record, "title", "") + title_color = getattr(record, "title_color", "") or self.LEVEL_COLOR_MAP.get( + record.levelno, "" + ) + if title and title_color: + title = f"{title_color + Style.BRIGHT}{title}{Style.RESET_ALL}" + # Make sure record.title is set, and padded with a space if not empty + record.title = f"{title} " if title else "" + + if self.no_color: + return remove_color_codes(super().format(record)) + else: + return super().format(record) + + +class StructuredLoggingFormatter(StructuredLogHandler, logging.Formatter): + def __init__(self): + # Set up CloudLoggingFilter to add diagnostic info to the log records + self.cloud_logging_filter = CloudLoggingFilter() + + # Init StructuredLogHandler + super().__init__() + + def format(self, record: logging.LogRecord) -> str: + self.cloud_logging_filter.filter(record) + return super().format(record) diff --git a/autogpts/autogpt/autogpt/logs/handlers.py b/autogpts/autogpt/autogpt/logs/handlers.py new file mode 100644 index 000000000000..6d371059adaf --- /dev/null +++ b/autogpts/autogpt/autogpt/logs/handlers.py @@ -0,0 +1,81 @@ +from __future__ import annotations + +import json +import logging +import random +import re +import time +from typing import TYPE_CHECKING + +from autogpt.logs.utils import remove_color_codes +from autogpt.speech import TextToSpeechProvider + +if TYPE_CHECKING: + from autogpt.speech import TTSConfig + + +class TypingConsoleHandler(logging.StreamHandler): + """Output stream to console using simulated typing""" + + # Typing speed settings in WPS (Words Per Second) + MIN_WPS = 25 + MAX_WPS = 100 + + def emit(self, record: logging.LogRecord) -> None: + min_typing_interval = 1 / TypingConsoleHandler.MAX_WPS + max_typing_interval = 1 / TypingConsoleHandler.MIN_WPS + + msg = self.format(record) + try: + # Split without discarding whitespace + words = re.findall(r"\S+\s*", msg) + + for i, word in enumerate(words): + self.stream.write(word) + self.flush() + if i >= len(words) - 1: + self.stream.write(self.terminator) + self.flush() + break + + interval = random.uniform(min_typing_interval, max_typing_interval) + # type faster after each word + min_typing_interval = min_typing_interval * 0.95 + max_typing_interval = max_typing_interval * 0.95 + time.sleep(interval) + except Exception: + self.handleError(record) + + +class TTSHandler(logging.Handler): + """Output messages to the configured TTS engine (if any)""" + + def __init__(self, config: TTSConfig): + super().__init__() + self.config = config + self.tts_provider = TextToSpeechProvider(config) + + def format(self, record: logging.LogRecord) -> str: + if getattr(record, "title", ""): + msg = f"{getattr(record, 'title')} {record.msg}" + else: + msg = f"{record.msg}" + + return remove_color_codes(msg) + + def emit(self, record: logging.LogRecord) -> None: + if not self.config.speak_mode: + return + + message = self.format(record) + self.tts_provider.say(message) + + +class JsonFileHandler(logging.FileHandler): + def format(self, record: logging.LogRecord) -> str: + record.json_data = json.loads(record.getMessage()) + return json.dumps(getattr(record, "json_data"), ensure_ascii=False, indent=4) + + def emit(self, record: logging.LogRecord) -> None: + with open(self.baseFilename, "w", encoding="utf-8") as f: + f.write(self.format(record)) diff --git a/autogpts/autogpt/autogpt/logs/helpers.py b/autogpts/autogpt/autogpt/logs/helpers.py new file mode 100644 index 000000000000..580e09a8ae0f --- /dev/null +++ b/autogpts/autogpt/autogpt/logs/helpers.py @@ -0,0 +1,70 @@ +import logging +from typing import Any, Optional + +from colorama import Fore + +from .config import SPEECH_OUTPUT_LOGGER, USER_FRIENDLY_OUTPUT_LOGGER, _chat_plugins + + +def user_friendly_output( + message: str, + level: int = logging.INFO, + title: str = "", + title_color: str = "", + preserve_message_color: bool = False, +) -> None: + """Outputs a message to the user in a user-friendly way. + + This function outputs on up to two channels: + 1. The console, in typewriter style + 2. Text To Speech, if configured + """ + logger = logging.getLogger(USER_FRIENDLY_OUTPUT_LOGGER) + + if _chat_plugins: + for plugin in _chat_plugins: + plugin.report(f"{title}: {message}") + + logger.log( + level, + message, + extra={ + "title": title, + "title_color": title_color, + "preserve_color": preserve_message_color, + }, + ) + + +def print_attribute( + title: str, value: Any, title_color: str = Fore.GREEN, value_color: str = "" +) -> None: + logger = logging.getLogger() + logger.info( + str(value), + extra={ + "title": f"{title.rstrip(':')}:", + "title_color": title_color, + "color": value_color, + }, + ) + + +def request_user_double_check(additionalText: Optional[str] = None) -> None: + if not additionalText: + additionalText = ( + "Please ensure you've setup and configured everything correctly. " + "Read https://docs.agpt.co/autogpt/setup/ to double check. " + "You can also create a github issue or join the discord and ask there!" + ) + + user_friendly_output( + additionalText, + level=logging.WARN, + title="DOUBLE CHECK CONFIGURATION", + preserve_message_color=True, + ) + + +def speak(message: str, level: int = logging.INFO) -> None: + logging.getLogger(SPEECH_OUTPUT_LOGGER).log(level, message) diff --git a/autogpts/autogpt/autogpt/logs/log_cycle.py b/autogpts/autogpt/autogpt/logs/log_cycle.py new file mode 100644 index 000000000000..062455fcbc0a --- /dev/null +++ b/autogpts/autogpt/autogpt/logs/log_cycle.py @@ -0,0 +1,81 @@ +import json +import os +from pathlib import Path +from typing import Any, Dict, Union + +from .config import LOG_DIR + +DEFAULT_PREFIX = "agent" +CURRENT_CONTEXT_FILE_NAME = "current_context.json" +NEXT_ACTION_FILE_NAME = "next_action.json" +PROMPT_SUMMARY_FILE_NAME = "prompt_summary.json" +SUMMARY_FILE_NAME = "summary.txt" +SUPERVISOR_FEEDBACK_FILE_NAME = "supervisor_feedback.txt" +PROMPT_SUPERVISOR_FEEDBACK_FILE_NAME = "prompt_supervisor_feedback.json" +USER_INPUT_FILE_NAME = "user_input.txt" + + +class LogCycleHandler: + """ + A class for logging cycle data. + """ + + def __init__(self): + self.log_count_within_cycle = 0 + + def create_outer_directory(self, ai_name: str, created_at: str) -> Path: + if os.environ.get("OVERWRITE_DEBUG") == "1": + outer_folder_name = "auto_gpt" + else: + ai_name_short = self.get_agent_short_name(ai_name) + outer_folder_name = f"{created_at}_{ai_name_short}" + + outer_folder_path = LOG_DIR / "DEBUG" / outer_folder_name + if not outer_folder_path.exists(): + outer_folder_path.mkdir(parents=True) + + return outer_folder_path + + def get_agent_short_name(self, ai_name: str) -> str: + return ai_name[:15].rstrip() if ai_name else DEFAULT_PREFIX + + def create_inner_directory(self, outer_folder_path: Path, cycle_count: int) -> Path: + nested_folder_name = str(cycle_count).zfill(3) + nested_folder_path = outer_folder_path / nested_folder_name + if not nested_folder_path.exists(): + nested_folder_path.mkdir() + + return nested_folder_path + + def create_nested_directory( + self, ai_name: str, created_at: str, cycle_count: int + ) -> Path: + outer_folder_path = self.create_outer_directory(ai_name, created_at) + nested_folder_path = self.create_inner_directory(outer_folder_path, cycle_count) + + return nested_folder_path + + def log_cycle( + self, + ai_name: str, + created_at: str, + cycle_count: int, + data: Union[Dict[str, Any], Any], + file_name: str, + ) -> None: + """ + Log cycle data to a JSON file. + + Args: + data (Any): The data to be logged. + file_name (str): The name of the file to save the logged data. + """ + cycle_log_dir = self.create_nested_directory(ai_name, created_at, cycle_count) + + json_data = json.dumps(data, ensure_ascii=False, indent=4) + log_file_path = cycle_log_dir / f"{self.log_count_within_cycle}_{file_name}" + + with open(log_file_path, "w", encoding="utf-8") as f: + f.write(json_data + "\n") + + self.log_count_within_cycle += 1 diff --git a/autogpts/autogpt/autogpt/logs/utils.py b/autogpts/autogpt/autogpt/logs/utils.py new file mode 100644 index 000000000000..d9f39af30950 --- /dev/null +++ b/autogpts/autogpt/autogpt/logs/utils.py @@ -0,0 +1,9 @@ +import re + + +def remove_color_codes(s: str) -> str: + return re.sub(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])", "", s) + + +def fmt_kwargs(kwargs: dict) -> str: + return ", ".join(f"{n}={repr(v)}" for n, v in kwargs.items()) diff --git a/autogpts/autogpt/autogpt/memory/vector/__init__.py b/autogpts/autogpt/autogpt/memory/vector/__init__.py new file mode 100644 index 000000000000..bcef31a4151b --- /dev/null +++ b/autogpts/autogpt/autogpt/memory/vector/__init__.py @@ -0,0 +1,156 @@ +from autogpt.config import Config + +from .memory_item import MemoryItem, MemoryItemFactory, MemoryItemRelevance +from .providers.base import VectorMemoryProvider as VectorMemory +from .providers.json_file import JSONFileMemory +from .providers.no_memory import NoMemory + +# List of supported memory backends +# Add a backend to this list if the import attempt is successful +supported_memory = ["json_file", "no_memory"] + +# try: +# from .providers.redis import RedisMemory + +# supported_memory.append("redis") +# except ImportError: +# RedisMemory = None + +# try: +# from .providers.pinecone import PineconeMemory + +# supported_memory.append("pinecone") +# except ImportError: +# PineconeMemory = None + +# try: +# from .providers.weaviate import WeaviateMemory + +# supported_memory.append("weaviate") +# except ImportError: +# WeaviateMemory = None + +# try: +# from .providers.milvus import MilvusMemory + +# supported_memory.append("milvus") +# except ImportError: +# MilvusMemory = None + + +def get_memory(config: Config) -> VectorMemory: + """ + Returns a memory object corresponding to the memory backend specified in the config. + + The type of memory object returned depends on the value of the `memory_backend` + attribute in the configuration. E.g. if `memory_backend` is set to "pinecone", a + `PineconeMemory` object is returned. If it is set to "redis", a `RedisMemory` + object is returned. + By default, a `JSONFileMemory` object is returned. + + Params: + config: A configuration object that contains information about the memory + backend to be used and other relevant parameters. + + Returns: + VectorMemory: an instance of a memory object based on the configuration provided + """ + memory = None + + match config.memory_backend: + case "json_file": + memory = JSONFileMemory(config) + + case "pinecone": + raise NotImplementedError( + "The Pinecone memory backend has been rendered incompatible by work on " + "the memory system, and was removed. Whether support will be added " + "back in the future is subject to discussion, feel free to pitch in: " + "https://github.com/Significant-Gravitas/AutoGPT/discussions/4280" + ) + # if not PineconeMemory: + # logger.warning( + # "Error: Pinecone is not installed. Please install pinecone" + # " to use Pinecone as a memory backend." + # ) + # else: + # memory = PineconeMemory(config) + # if clear: + # memory.clear() + + case "redis": + raise NotImplementedError( + "The Redis memory backend has been rendered incompatible by work on " + "the memory system, and has been removed temporarily." + ) + # if not RedisMemory: + # logger.warning( + # "Error: Redis is not installed. Please install redis-py to" + # " use Redis as a memory backend." + # ) + # else: + # memory = RedisMemory(config) + + case "weaviate": + raise NotImplementedError( + "The Weaviate memory backend has been rendered incompatible by work on " + "the memory system, and was removed. Whether support will be added " + "back in the future is subject to discussion, feel free to pitch in: " + "https://github.com/Significant-Gravitas/AutoGPT/discussions/4280" + ) + # if not WeaviateMemory: + # logger.warning( + # "Error: Weaviate is not installed. Please install weaviate-client" + # " to use Weaviate as a memory backend." + # ) + # else: + # memory = WeaviateMemory(config) + + case "milvus": + raise NotImplementedError( + "The Milvus memory backend has been rendered incompatible by work on " + "the memory system, and was removed. Whether support will be added " + "back in the future is subject to discussion, feel free to pitch in: " + "https://github.com/Significant-Gravitas/AutoGPT/discussions/4280" + ) + # if not MilvusMemory: + # logger.warning( + # "Error: pymilvus sdk is not installed, but required " + # "to use Milvus or Zilliz as memory backend. " + # "Please install pymilvus." + # ) + # else: + # memory = MilvusMemory(config) + + case "no_memory": + memory = NoMemory() + + case _: + raise ValueError( + f"Unknown memory backend '{config.memory_backend}'." + " Please check your config." + ) + + if memory is None: + memory = JSONFileMemory(config) + + return memory + + +def get_supported_memory_backends(): + return supported_memory + + +__all__ = [ + "get_memory", + "MemoryItem", + "MemoryItemFactory", + "MemoryItemRelevance", + "JSONFileMemory", + "NoMemory", + "VectorMemory", + # "RedisMemory", + # "PineconeMemory", + # "MilvusMemory", + # "WeaviateMemory", +] diff --git a/autogpts/autogpt/autogpt/memory/vector/memory_item.py b/autogpts/autogpt/autogpt/memory/vector/memory_item.py new file mode 100644 index 000000000000..8d03d0209b61 --- /dev/null +++ b/autogpts/autogpt/autogpt/memory/vector/memory_item.py @@ -0,0 +1,285 @@ +from __future__ import annotations + +import json +import logging +from typing import Literal + +import ftfy +import numpy as np +from pydantic import BaseModel + +from autogpt.config import Config +from autogpt.core.resource.model_providers import ( + ChatMessage, + ChatModelProvider, + EmbeddingModelProvider, +) +from autogpt.processing.text import chunk_content, split_text, summarize_text + +from .utils import Embedding, get_embedding + +logger = logging.getLogger(__name__) + +MemoryDocType = Literal["webpage", "text_file", "code_file", "agent_history"] + + +class MemoryItem(BaseModel, arbitrary_types_allowed=True): + """Memory object containing raw content as well as embeddings""" + + raw_content: str + summary: str + chunks: list[str] + chunk_summaries: list[str] + e_summary: Embedding + e_chunks: list[Embedding] + metadata: dict + + def relevance_for(self, query: str, e_query: Embedding | None = None): + return MemoryItemRelevance.of(self, query, e_query) + + def dump(self, calculate_length=False) -> str: + n_chunks = len(self.e_chunks) + return f""" +=============== MemoryItem =============== +Size: {n_chunks} chunks +Metadata: {json.dumps(self.metadata, indent=2)} +---------------- SUMMARY ----------------- +{self.summary} +------------------ RAW ------------------- +{self.raw_content} +========================================== +""" + + def __eq__(self, other: MemoryItem): + return ( + self.raw_content == other.raw_content + and self.chunks == other.chunks + and self.chunk_summaries == other.chunk_summaries + # Embeddings can either be list[float] or np.ndarray[float32], + # and for comparison they must be of the same type + and np.array_equal( + self.e_summary + if isinstance(self.e_summary, np.ndarray) + else np.array(self.e_summary, dtype=np.float32), + other.e_summary + if isinstance(other.e_summary, np.ndarray) + else np.array(other.e_summary, dtype=np.float32), + ) + and np.array_equal( + self.e_chunks + if isinstance(self.e_chunks[0], np.ndarray) + else [np.array(c, dtype=np.float32) for c in self.e_chunks], + other.e_chunks + if isinstance(other.e_chunks[0], np.ndarray) + else [np.array(c, dtype=np.float32) for c in other.e_chunks], + ) + ) + + +class MemoryItemFactory: + def __init__( + self, + llm_provider: ChatModelProvider, + embedding_provider: EmbeddingModelProvider, + ): + self.llm_provider = llm_provider + self.embedding_provider = embedding_provider + + async def from_text( + self, + text: str, + source_type: MemoryDocType, + config: Config, + metadata: dict = {}, + how_to_summarize: str | None = None, + question_for_summary: str | None = None, + ): + logger.debug(f"Memorizing text:\n{'-'*32}\n{text}\n{'-'*32}\n") + + # Fix encoding, e.g. removing unicode surrogates (see issue #778) + text = ftfy.fix_text(text) + + # FIXME: needs ModelProvider + chunks = [ + chunk + for chunk, _ in ( + split_text( + text=text, + config=config, + max_chunk_length=1000, # arbitrary, but shorter ~= better + tokenizer=self.llm_provider.get_tokenizer(config.fast_llm), + ) + if source_type != "code_file" + # TODO: chunk code based on structure/outline + else chunk_content( + content=text, + max_chunk_length=1000, + tokenizer=self.llm_provider.get_tokenizer(config.fast_llm), + ) + ) + ] + logger.debug("Chunks: " + str(chunks)) + + chunk_summaries = [ + summary + for summary, _ in [ + await summarize_text( + text=text_chunk, + instruction=how_to_summarize, + question=question_for_summary, + llm_provider=self.llm_provider, + config=config, + ) + for text_chunk in chunks + ] + ] + logger.debug("Chunk summaries: " + str(chunk_summaries)) + + e_chunks = get_embedding(chunks, config, self.embedding_provider) + + summary = ( + chunk_summaries[0] + if len(chunks) == 1 + else ( + await summarize_text( + text="\n\n".join(chunk_summaries), + instruction=how_to_summarize, + question=question_for_summary, + llm_provider=self.llm_provider, + config=config, + ) + )[0] + ) + logger.debug("Total summary: " + summary) + + # TODO: investigate search performance of weighted average vs summary + # e_average = np.average(e_chunks, axis=0, weights=[len(c) for c in chunks]) + e_summary = get_embedding(summary, config, self.embedding_provider) + + metadata["source_type"] = source_type + + return MemoryItem( + raw_content=text, + summary=summary, + chunks=chunks, + chunk_summaries=chunk_summaries, + e_summary=e_summary, + e_chunks=e_chunks, + metadata=metadata, + ) + + def from_text_file(self, content: str, path: str, config: Config): + return self.from_text(content, "text_file", config, {"location": path}) + + def from_code_file(self, content: str, path: str): + # TODO: implement tailored code memories + return self.from_text(content, "code_file", {"location": path}) + + def from_ai_action(self, ai_message: ChatMessage, result_message: ChatMessage): + # The result_message contains either user feedback + # or the result of the command specified in ai_message + + if ai_message.role != "assistant": + raise ValueError(f"Invalid role on 'ai_message': {ai_message.role}") + + result = ( + result_message.content + if result_message.content.startswith("Command") + else "None" + ) + user_input = ( + result_message.content + if result_message.content.startswith("Human feedback") + else "None" + ) + memory_content = ( + f"Assistant Reply: {ai_message.content}" + "\n\n" + f"Result: {result}" + "\n\n" + f"Human Feedback: {user_input}" + ) + + return self.from_text( + text=memory_content, + source_type="agent_history", + how_to_summarize=( + "if possible, also make clear the link between the command in the" + " assistant's response and the command result. " + "Do not mention the human feedback if there is none.", + ), + ) + + def from_webpage( + self, content: str, url: str, config: Config, question: str | None = None + ): + return self.from_text( + text=content, + source_type="webpage", + config=config, + metadata={"location": url}, + question_for_summary=question, + ) + + +class MemoryItemRelevance(BaseModel): + """ + Class that encapsulates memory relevance search functionality and data. + Instances contain a MemoryItem and its relevance scores for a given query. + """ + + memory_item: MemoryItem + for_query: str + summary_relevance_score: float + chunk_relevance_scores: list[float] + + @staticmethod + def of( + memory_item: MemoryItem, for_query: str, e_query: Embedding | None = None + ) -> MemoryItemRelevance: + e_query = e_query if e_query is not None else get_embedding(for_query) + _, srs, crs = MemoryItemRelevance.calculate_scores(memory_item, e_query) + return MemoryItemRelevance( + for_query=for_query, + memory_item=memory_item, + summary_relevance_score=srs, + chunk_relevance_scores=crs, + ) + + @staticmethod + def calculate_scores( + memory: MemoryItem, compare_to: Embedding + ) -> tuple[float, float, list[float]]: + """ + Calculates similarity between given embedding and all embeddings of the memory + + Returns: + float: the aggregate (max) relevance score of the memory + float: the relevance score of the memory summary + list: the relevance scores of the memory chunks + """ + summary_relevance_score = np.dot(memory.e_summary, compare_to) + chunk_relevance_scores = np.dot(memory.e_chunks, compare_to).tolist() + logger.debug(f"Relevance of summary: {summary_relevance_score}") + logger.debug(f"Relevance of chunks: {chunk_relevance_scores}") + + relevance_scores = [summary_relevance_score, *chunk_relevance_scores] + logger.debug(f"Relevance scores: {relevance_scores}") + return max(relevance_scores), summary_relevance_score, chunk_relevance_scores + + @property + def score(self) -> float: + """The aggregate relevance score of the memory item for the given query""" + return max([self.summary_relevance_score, *self.chunk_relevance_scores]) + + @property + def most_relevant_chunk(self) -> tuple[str, float]: + """The most relevant chunk of the memory item + its score for the given query""" + i_relmax = np.argmax(self.chunk_relevance_scores) + return self.memory_item.chunks[i_relmax], self.chunk_relevance_scores[i_relmax] + + def __str__(self): + return ( + f"{self.memory_item.summary} ({self.summary_relevance_score}) " + f"{self.chunk_relevance_scores}" + ) diff --git a/autogpts/autogpt/autogpt/memory/vector/providers/__init__.py b/autogpts/autogpt/autogpt/memory/vector/providers/__init__.py new file mode 100644 index 000000000000..12a23b6000b5 --- /dev/null +++ b/autogpts/autogpt/autogpt/memory/vector/providers/__init__.py @@ -0,0 +1,7 @@ +from .json_file import JSONFileMemory +from .no_memory import NoMemory + +__all__ = [ + "JSONFileMemory", + "NoMemory", +] diff --git a/autogpts/autogpt/autogpt/memory/vector/providers/base.py b/autogpts/autogpt/autogpt/memory/vector/providers/base.py new file mode 100644 index 000000000000..8883f134603a --- /dev/null +++ b/autogpts/autogpt/autogpt/memory/vector/providers/base.py @@ -0,0 +1,79 @@ +import abc +import functools +import logging +from typing import MutableSet, Sequence + +import numpy as np + +from autogpt.config.config import Config + +from .. import MemoryItem, MemoryItemRelevance +from ..utils import Embedding, get_embedding + +logger = logging.getLogger(__name__) + + +class VectorMemoryProvider(MutableSet[MemoryItem]): + @abc.abstractmethod + def __init__(self, config: Config): + pass + + def get(self, query: str, config: Config) -> MemoryItemRelevance | None: + """ + Gets the data from the memory that is most relevant to the given query. + + Args: + query: The query used to retrieve information. + config: The config Object. + + Returns: The most relevant Memory + """ + result = self.get_relevant(query, 1, config) + return result[0] if result else None + + def get_relevant( + self, query: str, k: int, config: Config + ) -> Sequence[MemoryItemRelevance]: + """ + Returns the top-k most relevant memories for the given query + + Args: + query: the query to compare stored memories to + k: the number of relevant memories to fetch + config: The config Object. + + Returns: + list[MemoryItemRelevance] containing the top [k] relevant memories + """ + if len(self) < 1: + return [] + + logger.debug( + f"Searching for {k} relevant memories for query '{query}'; " + f"{len(self)} memories in index" + ) + + relevances = self.score_memories_for_relevance(query, config) + logger.debug(f"Memory relevance scores: {[str(r) for r in relevances]}") + + # take last k items and reverse + top_k_indices = np.argsort([r.score for r in relevances])[-k:][::-1] + + return [relevances[i] for i in top_k_indices] + + def score_memories_for_relevance( + self, for_query: str, config: Config + ) -> Sequence[MemoryItemRelevance]: + """ + Returns MemoryItemRelevance for every memory in the index. + Implementations may override this function for performance purposes. + """ + e_query: Embedding = get_embedding(for_query, config) + return [m.relevance_for(for_query, e_query) for m in self] + + def get_stats(self) -> tuple[int, int]: + """ + Returns: + tuple (n_memories: int, n_chunks: int): the stats of the memory index + """ + return len(self), functools.reduce(lambda t, m: t + len(m.e_chunks), self, 0) diff --git a/autogpts/autogpt/autogpt/memory/vector/providers/json_file.py b/autogpts/autogpt/autogpt/memory/vector/providers/json_file.py new file mode 100644 index 000000000000..efab7e8f6ea8 --- /dev/null +++ b/autogpts/autogpt/autogpt/memory/vector/providers/json_file.py @@ -0,0 +1,92 @@ +from __future__ import annotations + +import logging +from pathlib import Path +from typing import Iterator + +import orjson + +from autogpt.config import Config + +from ..memory_item import MemoryItem +from .base import VectorMemoryProvider + +logger = logging.getLogger(__name__) + + +class JSONFileMemory(VectorMemoryProvider): + """Memory backend that stores memories in a JSON file""" + + SAVE_OPTIONS = orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_SERIALIZE_DATACLASS + + file_path: Path + memories: list[MemoryItem] + + def __init__(self, config: Config) -> None: + """Initialize a class instance + + Args: + config: Config object + + Returns: + None + """ + self.file_path = config.workspace_path / f"{config.memory_index}.json" + self.file_path.touch() + logger.debug( + f"Initialized {__class__.__name__} with index path {self.file_path}" + ) + + self.memories = [] + try: + self.load_index() + logger.debug(f"Loaded {len(self.memories)} MemoryItems from file") + except Exception as e: + logger.warning(f"Could not load MemoryItems from file: {e}") + self.save_index() + + def __iter__(self) -> Iterator[MemoryItem]: + return iter(self.memories) + + def __contains__(self, x: MemoryItem) -> bool: + return x in self.memories + + def __len__(self) -> int: + return len(self.memories) + + def add(self, item: MemoryItem): + self.memories.append(item) + logger.debug(f"Adding item to memory: {item.dump()}") + self.save_index() + return len(self.memories) + + def discard(self, item: MemoryItem): + try: + self.remove(item) + except ValueError: # item not in memory + pass + + def clear(self): + """Clears the data in memory.""" + self.memories.clear() + self.save_index() + + def load_index(self): + """Loads all memories from the index file""" + if not self.file_path.is_file(): + logger.debug(f"Index file '{self.file_path}' does not exist") + return + with self.file_path.open("r") as f: + logger.debug(f"Loading memories from index file '{self.file_path}'") + json_index = orjson.loads(f.read()) + for memory_item_dict in json_index: + self.memories.append(MemoryItem.parse_obj(memory_item_dict)) + + def save_index(self): + logger.debug(f"Saving memory index to file {self.file_path}") + with self.file_path.open("wb") as f: + return f.write( + orjson.dumps( + [m.dict() for m in self.memories], option=self.SAVE_OPTIONS + ) + ) diff --git a/autogpts/autogpt/autogpt/memory/vector/providers/no_memory.py b/autogpts/autogpt/autogpt/memory/vector/providers/no_memory.py new file mode 100644 index 000000000000..01f6c18014e3 --- /dev/null +++ b/autogpts/autogpt/autogpt/memory/vector/providers/no_memory.py @@ -0,0 +1,36 @@ +"""A class that does not store any data. This is the default memory provider.""" +from __future__ import annotations + +from typing import Iterator, Optional + +from autogpt.config.config import Config + +from .. import MemoryItem +from .base import VectorMemoryProvider + + +class NoMemory(VectorMemoryProvider): + """ + A class that does not store any data. This is the default memory provider. + """ + + def __init__(self, config: Optional[Config] = None): + pass + + def __iter__(self) -> Iterator[MemoryItem]: + return iter([]) + + def __contains__(self, x: MemoryItem) -> bool: + return False + + def __len__(self) -> int: + return 0 + + def add(self, item: MemoryItem): + pass + + def discard(self, item: MemoryItem): + pass + + def clear(self): + pass diff --git a/autogpts/autogpt/autogpt/memory/vector/utils.py b/autogpts/autogpt/autogpt/memory/vector/utils.py new file mode 100644 index 000000000000..b8b87a49133b --- /dev/null +++ b/autogpts/autogpt/autogpt/memory/vector/utils.py @@ -0,0 +1,98 @@ +import logging +from contextlib import suppress +from typing import Any, Sequence, overload + +import numpy as np + +from autogpt.config import Config +from autogpt.core.resource.model_providers import EmbeddingModelProvider + +logger = logging.getLogger(__name__) + +Embedding = list[float] | list[np.float32] | np.ndarray[Any, np.dtype[np.float32]] +"""Embedding vector""" + +TText = Sequence[int] +"""Tokenized text""" + + +@overload +async def get_embedding( + input: str | TText, config: Config, embedding_provider: EmbeddingModelProvider +) -> Embedding: + ... + + +@overload +async def get_embedding( + input: list[str] | list[TText], + config: Config, + embedding_provider: EmbeddingModelProvider, +) -> list[Embedding]: + ... + + +async def get_embedding( + input: str | TText | list[str] | list[TText], + config: Config, + embedding_provider: EmbeddingModelProvider, +) -> Embedding | list[Embedding]: + """Get an embedding from the ada model. + + Args: + input: Input text to get embeddings for, encoded as a string or array of tokens. + Multiple inputs may be given as a list of strings or token arrays. + embedding_provider: The provider to create embeddings. + + Returns: + List[float]: The embedding. + """ + multiple = isinstance(input, list) and all(not isinstance(i, int) for i in input) + + if isinstance(input, str): + input = input.replace("\n", " ") + + with suppress(NotImplementedError): + return _get_embedding_with_plugin(input, config) + + elif multiple and isinstance(input[0], str): + input = [text.replace("\n", " ") for text in input] + + with suppress(NotImplementedError): + return [_get_embedding_with_plugin(i, config) for i in input] + + model = config.embedding_model + + logger.debug( + f"Getting embedding{f's for {len(input)} inputs' if multiple else ''}" + f" with model '{model}'" + ) + + if not multiple: + return ( + await embedding_provider.create_embedding( + text=input, + model_name=model, + embedding_parser=lambda e: e, + ) + ).embedding + else: + embeddings = [] + for text in input: + result = await embedding_provider.create_embedding( + text=text, + model_name=model, + embedding_parser=lambda e: e, + ) + embeddings.append(result.embedding) + return embeddings + + +def _get_embedding_with_plugin(text: str, config: Config) -> Embedding: + for plugin in config.plugins: + if plugin.can_handle_text_embedding(text): + embedding = plugin.handle_text_embedding(text) + if embedding is not None: + return embedding + + raise NotImplementedError diff --git a/autogpts/autogpt/autogpt/models/__init__.py b/autogpts/autogpt/autogpt/models/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/autogpts/autogpt/autogpt/models/action_history.py b/autogpts/autogpt/autogpt/models/action_history.py new file mode 100644 index 000000000000..51806fb03879 --- /dev/null +++ b/autogpts/autogpt/autogpt/models/action_history.py @@ -0,0 +1,234 @@ +from __future__ import annotations + +import asyncio +from typing import TYPE_CHECKING, Any, Iterator, Literal, Optional + +from pydantic import BaseModel, Field + +from autogpt.processing.text import summarize_text +from autogpt.prompts.utils import format_numbered_list, indent + +if TYPE_CHECKING: + from autogpt.agents.base import CommandArgs, CommandName + from autogpt.config.config import Config + from autogpt.core.resource.model_providers import ChatModelProvider + + +class Action(BaseModel): + name: str + args: dict[str, Any] + reasoning: str + + def format_call(self) -> str: + return ( + f"{self.name}" + f"({', '.join([f'{a}={repr(v)}' for a, v in self.args.items()])})" + ) + + +class ActionSuccessResult(BaseModel): + outputs: Any + status: Literal["success"] = "success" + + def __str__(self) -> str: + outputs = str(self.outputs).replace("```", r"\```") + multiline = "\n" in outputs + return f"```\n{self.outputs}\n```" if multiline else str(self.outputs) + + +class ErrorInfo(BaseModel): + args: tuple + message: str + exception_type: str + repr: str + + @staticmethod + def from_exception(exception: Exception) -> ErrorInfo: + return ErrorInfo( + args=exception.args, + message=getattr(exception, "message", exception.args[0]), + exception_type=exception.__class__.__name__, + repr=repr(exception), + ) + + def __str__(self): + return repr(self) + + def __repr__(self): + return self.repr + + +class ActionErrorResult(BaseModel): + reason: str + error: Optional[ErrorInfo] = None + status: Literal["error"] = "error" + + @staticmethod + def from_exception(exception: Exception) -> ActionErrorResult: + return ActionErrorResult( + reason=getattr(exception, "message", exception.args[0]), + error=ErrorInfo.from_exception(exception), + ) + + def __str__(self) -> str: + return f"Action failed: '{self.reason}'" + + +class ActionInterruptedByHuman(BaseModel): + feedback: str + status: Literal["interrupted_by_human"] = "interrupted_by_human" + + def __str__(self) -> str: + return ( + 'The user interrupted the action with the following feedback: "%s"' + % self.feedback + ) + + +ActionResult = ActionSuccessResult | ActionErrorResult | ActionInterruptedByHuman + + +class Episode(BaseModel): + action: Action + result: ActionResult | None + summary: str | None = None + + def format(self): + step = f"Executed `{self.action.format_call()}`\n" + step += f'- **Reasoning:** "{self.action.reasoning}"\n' + step += ( + "- **Status:** " + f"`{self.result.status if self.result else 'did_not_finish'}`\n" + ) + if self.result: + if self.result.status == "success": + result = str(self.result) + result = "\n" + indent(result) if "\n" in result else result + step += f"- **Output:** {result}" + elif self.result.status == "error": + step += f"- **Reason:** {self.result.reason}\n" + if self.result.error: + step += f"- **Error:** {self.result.error}\n" + elif self.result.status == "interrupted_by_human": + step += f"- **Feedback:** {self.result.feedback}\n" + return step + + def __str__(self) -> str: + executed_action = f"Executed `{self.action.format_call()}`" + action_result = f": {self.result}" if self.result else "." + return executed_action + action_result + + +class EpisodicActionHistory(BaseModel): + """Utility container for an action history""" + + episodes: list[Episode] = Field(default_factory=list) + cursor: int = 0 + _lock = asyncio.Lock() + + @property + def current_episode(self) -> Episode | None: + if self.cursor == len(self): + return None + return self[self.cursor] + + def __getitem__(self, key: int) -> Episode: + return self.episodes[key] + + def __iter__(self) -> Iterator[Episode]: + return iter(self.episodes) + + def __len__(self) -> int: + return len(self.episodes) + + def __bool__(self) -> bool: + return len(self.episodes) > 0 + + def register_action(self, action: Action) -> None: + if not self.current_episode: + self.episodes.append(Episode(action=action, result=None)) + assert self.current_episode + elif self.current_episode.action: + raise ValueError("Action for current cycle already set") + + def register_result(self, result: ActionResult) -> None: + if not self.current_episode: + raise RuntimeError("Cannot register result for cycle without action") + elif self.current_episode.result: + raise ValueError("Result for current cycle already set") + + self.current_episode.result = result + self.cursor = len(self.episodes) + + def matches_last_command( + self, command_name: CommandName, arguments: CommandArgs + ) -> bool: + """Check if the last command matches the given name and arguments.""" + if len(self.episodes) > 0: + last_command = self.episodes[-1].action + return last_command.name == command_name and last_command.args == arguments + return False + + def rewind(self, number_of_episodes: int = 0) -> None: + """Resets the history to an earlier state. + + Params: + number_of_cycles (int): The number of cycles to rewind. Default is 0. + When set to 0, it will only reset the current cycle. + """ + # Remove partial record of current cycle + if self.current_episode: + if self.current_episode.action and not self.current_episode.result: + self.episodes.pop(self.cursor) + + # Rewind the specified number of cycles + if number_of_episodes > 0: + self.episodes = self.episodes[:-number_of_episodes] + self.cursor = len(self.episodes) + + async def handle_compression( + self, llm_provider: ChatModelProvider, app_config: Config + ) -> None: + """Compresses each episode in the action history using an LLM. + + This method iterates over all episodes in the action history without a summary, + and generates a summary for them using an LLM. + """ + compress_instruction = ( + "The text represents an action, the reason for its execution, " + "and its result. " + "Condense the action taken and its result into one line. " + "Preserve any specific factual information gathered by the action." + ) + async with self._lock: + # Gather all episodes without a summary + episodes_to_summarize = [ep for ep in self.episodes if ep.summary is None] + + # Parallelize summarization calls + summarize_coroutines = [ + summarize_text( + episode.format(), + instruction=compress_instruction, + llm_provider=llm_provider, + config=app_config, + ) + for episode in episodes_to_summarize + ] + summaries = await asyncio.gather(*summarize_coroutines) + + # Assign summaries to episodes + for episode, (summary, _) in zip(episodes_to_summarize, summaries): + episode.summary = summary + + def fmt_list(self) -> str: + return format_numbered_list(self.episodes) + + def fmt_paragraph(self) -> str: + steps: list[str] = [] + + for i, episode in enumerate(self.episodes, 1): + step = f"### Step {i}: {episode.format()}\n" + + steps.append(step) + + return "\n\n".join(steps) diff --git a/autogpts/autogpt/autogpt/models/base_open_ai_plugin.py b/autogpts/autogpt/autogpt/models/base_open_ai_plugin.py new file mode 100644 index 000000000000..a269bb9c415f --- /dev/null +++ b/autogpts/autogpt/autogpt/models/base_open_ai_plugin.py @@ -0,0 +1,251 @@ +"""Handles loading of plugins.""" +from typing import Any, Dict, List, Optional, Tuple, TypedDict, TypeVar + +from auto_gpt_plugin_template import AutoGPTPluginTemplate + +PromptGenerator = TypeVar("PromptGenerator") + + +class Message(TypedDict): + role: str + content: str + + +class BaseOpenAIPlugin(AutoGPTPluginTemplate): + """ + This is a BaseOpenAIPlugin class for generating AutoGPT plugins. + """ + + def __init__(self, manifests_specs_clients: dict): + # super().__init__() + self._name = manifests_specs_clients["manifest"]["name_for_model"] + self._version = manifests_specs_clients["manifest"]["schema_version"] + self._description = manifests_specs_clients["manifest"]["description_for_model"] + self._client = manifests_specs_clients["client"] + self._manifest = manifests_specs_clients["manifest"] + self._openapi_spec = manifests_specs_clients["openapi_spec"] + + def can_handle_on_response(self) -> bool: + """This method is called to check that the plugin can + handle the on_response method. + Returns: + bool: True if the plugin can handle the on_response method.""" + return False + + def on_response(self, response: str, *args, **kwargs) -> str: + """This method is called when a response is received from the model.""" + return response + + def can_handle_post_prompt(self) -> bool: + """This method is called to check that the plugin can + handle the post_prompt method. + Returns: + bool: True if the plugin can handle the post_prompt method.""" + return False + + def post_prompt(self, prompt: PromptGenerator) -> PromptGenerator: + """This method is called just after the generate_prompt is called, + but actually before the prompt is generated. + Args: + prompt (PromptGenerator): The prompt generator. + Returns: + PromptGenerator: The prompt generator. + """ + return prompt + + def can_handle_on_planning(self) -> bool: + """This method is called to check that the plugin can + handle the on_planning method. + Returns: + bool: True if the plugin can handle the on_planning method.""" + return False + + def on_planning( + self, prompt: PromptGenerator, messages: List[Message] + ) -> Optional[str]: + """This method is called before the planning chat completion is done. + Args: + prompt (PromptGenerator): The prompt generator. + messages (List[str]): The list of messages. + """ + + def can_handle_post_planning(self) -> bool: + """This method is called to check that the plugin can + handle the post_planning method. + Returns: + bool: True if the plugin can handle the post_planning method.""" + return False + + def post_planning(self, response: str) -> str: + """This method is called after the planning chat completion is done. + Args: + response (str): The response. + Returns: + str: The resulting response. + """ + return response + + def can_handle_pre_instruction(self) -> bool: + """This method is called to check that the plugin can + handle the pre_instruction method. + Returns: + bool: True if the plugin can handle the pre_instruction method.""" + return False + + def pre_instruction(self, messages: List[Message]) -> List[Message]: + """This method is called before the instruction chat is done. + Args: + messages (List[Message]): The list of context messages. + Returns: + List[Message]: The resulting list of messages. + """ + return messages + + def can_handle_on_instruction(self) -> bool: + """This method is called to check that the plugin can + handle the on_instruction method. + Returns: + bool: True if the plugin can handle the on_instruction method.""" + return False + + def on_instruction(self, messages: List[Message]) -> Optional[str]: + """This method is called when the instruction chat is done. + Args: + messages (List[Message]): The list of context messages. + Returns: + Optional[str]: The resulting message. + """ + + def can_handle_post_instruction(self) -> bool: + """This method is called to check that the plugin can + handle the post_instruction method. + Returns: + bool: True if the plugin can handle the post_instruction method.""" + return False + + def post_instruction(self, response: str) -> str: + """This method is called after the instruction chat is done. + Args: + response (str): The response. + Returns: + str: The resulting response. + """ + return response + + def can_handle_pre_command(self) -> bool: + """This method is called to check that the plugin can + handle the pre_command method. + Returns: + bool: True if the plugin can handle the pre_command method.""" + return False + + def pre_command( + self, command_name: str, arguments: Dict[str, Any] + ) -> Tuple[str, Dict[str, Any]]: + """This method is called before the command is executed. + Args: + command_name (str): The command name. + arguments (Dict[str, Any]): The arguments. + Returns: + Tuple[str, Dict[str, Any]]: The command name and the arguments. + """ + return command_name, arguments + + def can_handle_post_command(self) -> bool: + """This method is called to check that the plugin can + handle the post_command method. + Returns: + bool: True if the plugin can handle the post_command method.""" + return False + + def post_command(self, command_name: str, response: str) -> str: + """This method is called after the command is executed. + Args: + command_name (str): The command name. + response (str): The response. + Returns: + str: The resulting response. + """ + return response + + def can_handle_chat_completion( + self, messages: Dict[Any, Any], model: str, temperature: float, max_tokens: int + ) -> bool: + """This method is called to check that the plugin can + handle the chat_completion method. + Args: + messages (List[Message]): The messages. + model (str): The model name. + temperature (float): The temperature. + max_tokens (int): The max tokens. + Returns: + bool: True if the plugin can handle the chat_completion method.""" + return False + + def handle_chat_completion( + self, messages: List[Message], model: str, temperature: float, max_tokens: int + ) -> str: + """This method is called when the chat completion is done. + Args: + messages (List[Message]): The messages. + model (str): The model name. + temperature (float): The temperature. + max_tokens (int): The max tokens. + Returns: + str: The resulting response. + """ + + def can_handle_text_embedding(self, text: str) -> bool: + """This method is called to check that the plugin can + handle the text_embedding method. + + Args: + text (str): The text to be convert to embedding. + Returns: + bool: True if the plugin can handle the text_embedding method.""" + return False + + def handle_text_embedding(self, text: str) -> list[float]: + """This method is called to create a text embedding. + + Args: + text (str): The text to be convert to embedding. + Returns: + list[float]: The created embedding vector. + """ + + def can_handle_user_input(self, user_input: str) -> bool: + """This method is called to check that the plugin can + handle the user_input method. + + Args: + user_input (str): The user input. + + Returns: + bool: True if the plugin can handle the user_input method.""" + return False + + def user_input(self, user_input: str) -> str: + """This method is called to request user input to the user. + + Args: + user_input (str): The question or prompt to ask the user. + + Returns: + str: The user input. + """ + + def can_handle_report(self) -> bool: + """This method is called to check that the plugin can + handle the report method. + + Returns: + bool: True if the plugin can handle the report method.""" + return False + + def report(self, message: str) -> None: + """This method is called to report a message to the user. + + Args: + message (str): The message to report. + """ diff --git a/autogpts/autogpt/autogpt/models/command.py b/autogpts/autogpt/autogpt/models/command.py new file mode 100644 index 000000000000..5472dc4aa481 --- /dev/null +++ b/autogpts/autogpt/autogpt/models/command.py @@ -0,0 +1,72 @@ +from __future__ import annotations + +import inspect +from typing import TYPE_CHECKING, Any, Callable, Literal, Optional + +if TYPE_CHECKING: + from autogpt.agents.base import BaseAgent + from autogpt.config import Config + +from .command_parameter import CommandParameter +from .context_item import ContextItem + +CommandReturnValue = Any +CommandOutput = CommandReturnValue | tuple[CommandReturnValue, ContextItem] + + +class Command: + """A class representing a command. + + Attributes: + name (str): The name of the command. + description (str): A brief description of what the command does. + parameters (list): The parameters of the function that the command executes. + """ + + def __init__( + self, + name: str, + description: str, + method: Callable[..., CommandOutput], + parameters: list[CommandParameter], + enabled: Literal[True] | Callable[[Config], bool] = True, + disabled_reason: Optional[str] = None, + aliases: list[str] = [], + available: bool | Callable[[BaseAgent], bool] = True, + ): + self.name = name + self.description = description + self.method = method + self.parameters = parameters + self.enabled = enabled + self.disabled_reason = disabled_reason + self.aliases = aliases + self.available = available + + @property + def is_async(self) -> bool: + return inspect.iscoroutinefunction(self.method) + + def __call__(self, *args, agent: BaseAgent, **kwargs) -> Any: + if callable(self.enabled) and not self.enabled(agent.legacy_config): + if self.disabled_reason: + raise RuntimeError( + f"Command '{self.name}' is disabled: {self.disabled_reason}" + ) + raise RuntimeError(f"Command '{self.name}' is disabled") + + if not self.available or callable(self.available) and not self.available(agent): + raise RuntimeError(f"Command '{self.name}' is not available") + + return self.method(*args, **kwargs, agent=agent) + + def __str__(self) -> str: + params = [ + f"{param.name}: " + + ("%s" if param.spec.required else "Optional[%s]") % param.spec.type.value + for param in self.parameters + ] + return ( + f"{self.name}: {self.description.rstrip('.')}. " + f"Params: ({', '.join(params)})" + ) diff --git a/autogpts/autogpt/autogpt/models/command_parameter.py b/autogpts/autogpt/autogpt/models/command_parameter.py new file mode 100644 index 000000000000..402e870fc87b --- /dev/null +++ b/autogpts/autogpt/autogpt/models/command_parameter.py @@ -0,0 +1,17 @@ +import dataclasses + +from autogpt.core.utils.json_schema import JSONSchema + + +@dataclasses.dataclass +class CommandParameter: + name: str + spec: JSONSchema + + def __repr__(self): + return "CommandParameter('%s', '%s', '%s', %s)" % ( + self.name, + self.spec.type, + self.spec.description, + self.spec.required, + ) diff --git a/autogpts/autogpt/autogpt/models/command_registry.py b/autogpts/autogpt/autogpt/models/command_registry.py new file mode 100644 index 000000000000..ec372c9f53c1 --- /dev/null +++ b/autogpts/autogpt/autogpt/models/command_registry.py @@ -0,0 +1,212 @@ +from __future__ import annotations + +import importlib +import inspect +import logging +from dataclasses import dataclass, field +from types import ModuleType +from typing import TYPE_CHECKING, Any, Iterator + +if TYPE_CHECKING: + from autogpt.agents.base import BaseAgent + from autogpt.config import Config + + +from autogpt.command_decorator import AUTO_GPT_COMMAND_IDENTIFIER +from autogpt.models.command import Command + +logger = logging.getLogger(__name__) + + +class CommandRegistry: + """ + The CommandRegistry class is a manager for a collection of Command objects. + It allows the registration, modification, and retrieval of Command objects, + as well as the scanning and loading of command plugins from a specified + directory. + """ + + commands: dict[str, Command] + commands_aliases: dict[str, Command] + + # Alternative way to structure the registry; currently redundant with self.commands + categories: dict[str, CommandCategory] + + @dataclass + class CommandCategory: + name: str + title: str + description: str + commands: list[Command] = field(default_factory=list[Command]) + modules: list[ModuleType] = field(default_factory=list[ModuleType]) + + def __init__(self): + self.commands = {} + self.commands_aliases = {} + self.categories = {} + + def __contains__(self, command_name: str): + return command_name in self.commands or command_name in self.commands_aliases + + def _import_module(self, module_name: str) -> Any: + return importlib.import_module(module_name) + + def _reload_module(self, module: Any) -> Any: + return importlib.reload(module) + + def register(self, cmd: Command) -> None: + if cmd.name in self.commands: + logger.warning( + f"Command '{cmd.name}' already registered and will be overwritten!" + ) + self.commands[cmd.name] = cmd + + if cmd.name in self.commands_aliases: + logger.warning( + f"Command '{cmd.name}' will overwrite alias with the same name of " + f"'{self.commands_aliases[cmd.name]}'!" + ) + for alias in cmd.aliases: + self.commands_aliases[alias] = cmd + + def unregister(self, command: Command) -> None: + if command.name in self.commands: + del self.commands[command.name] + for alias in command.aliases: + del self.commands_aliases[alias] + else: + raise KeyError(f"Command '{command.name}' not found in registry.") + + def reload_commands(self) -> None: + """Reloads all loaded command plugins.""" + for cmd_name in self.commands: + cmd = self.commands[cmd_name] + module = self._import_module(cmd.__module__) + reloaded_module = self._reload_module(module) + if hasattr(reloaded_module, "register"): + reloaded_module.register(self) + + def get_command(self, name: str) -> Command | None: + if name in self.commands: + return self.commands[name] + + if name in self.commands_aliases: + return self.commands_aliases[name] + + def call(self, command_name: str, agent: BaseAgent, **kwargs) -> Any: + if command := self.get_command(command_name): + return command(**kwargs, agent=agent) + raise KeyError(f"Command '{command_name}' not found in registry") + + def list_available_commands(self, agent: BaseAgent) -> Iterator[Command]: + """Iterates over all registered commands and yields those that are available. + + Params: + agent (BaseAgent): The agent that the commands will be checked against. + + Yields: + Command: The next available command. + """ + + for cmd in self.commands.values(): + available = cmd.available + if callable(cmd.available): + available = cmd.available(agent) + if available: + yield cmd + + # def command_specs(self) -> str: + # """ + # Returns a technical declaration of all commands in the registry, + # for use in a prompt. + # """ + # + # Declaring functions or commands should be done in a model-specific way to + # achieve optimal results. For this reason, it should NOT be implemented here, + # but in an LLM provider module. + # MUST take command AVAILABILITY into account. + + @staticmethod + def with_command_modules(modules: list[str], config: Config) -> CommandRegistry: + new_registry = CommandRegistry() + + logger.debug( + "The following command categories are disabled: " + f"{config.disabled_command_categories}" + ) + enabled_command_modules = [ + x for x in modules if x not in config.disabled_command_categories + ] + + logger.debug( + f"The following command categories are enabled: {enabled_command_modules}" + ) + + for command_module in enabled_command_modules: + new_registry.import_command_module(command_module) + + # Unregister commands that are incompatible with the current config + for command in [c for c in new_registry.commands.values()]: + if callable(command.enabled) and not command.enabled(config): + new_registry.unregister(command) + logger.debug( + f"Unregistering incompatible command '{command.name}':" + f" \"{command.disabled_reason or 'Disabled by current config.'}\"" + ) + + return new_registry + + def import_command_module(self, module_name: str) -> None: + """ + Imports the specified Python module containing command plugins. + + This method imports the associated module and registers any functions or + classes that are decorated with the `AUTO_GPT_COMMAND_IDENTIFIER` attribute + as `Command` objects. The registered `Command` objects are then added to the + `commands` dictionary of the `CommandRegistry` object. + + Args: + module_name (str): The name of the module to import for command plugins. + """ + + module = importlib.import_module(module_name) + + category = self.register_module_category(module) + + for attr_name in dir(module): + attr = getattr(module, attr_name) + + command = None + + # Register decorated functions + if getattr(attr, AUTO_GPT_COMMAND_IDENTIFIER, False): + command = attr.command + + # Register command classes + elif ( + inspect.isclass(attr) and issubclass(attr, Command) and attr != Command + ): + command = attr() + + if command: + self.register(command) + category.commands.append(command) + + def register_module_category(self, module: ModuleType) -> CommandCategory: + if not (category_name := getattr(module, "COMMAND_CATEGORY", None)): + raise ValueError(f"Cannot import invalid command module {module.__name__}") + + if category_name not in self.categories: + self.categories[category_name] = CommandRegistry.CommandCategory( + name=category_name, + title=getattr( + module, "COMMAND_CATEGORY_TITLE", category_name.capitalize() + ), + description=getattr(module, "__doc__", ""), + ) + + category = self.categories[category_name] + if module not in category.modules: + category.modules.append(module) + + return category diff --git a/autogpts/autogpt/autogpt/models/context_item.py b/autogpts/autogpt/autogpt/models/context_item.py new file mode 100644 index 000000000000..a669bdcc8bba --- /dev/null +++ b/autogpts/autogpt/autogpt/models/context_item.py @@ -0,0 +1,95 @@ +import logging +import os.path +from abc import ABC, abstractmethod +from pathlib import Path +from typing import Optional + +from pydantic import BaseModel, Field + +from autogpt.commands.file_operations_utils import decode_textual_file + +logger = logging.getLogger(__name__) + + +class ContextItem(ABC): + @property + @abstractmethod + def description(self) -> str: + """Description of the context item""" + ... + + @property + @abstractmethod + def source(self) -> Optional[str]: + """A string indicating the source location of the context item""" + ... + + @property + @abstractmethod + def content(self) -> str: + """The content represented by the context item""" + ... + + def fmt(self) -> str: + return ( + f"{self.description} (source: {self.source})\n" + "```\n" + f"{self.content}\n" + "```" + ) + + +class FileContextItem(BaseModel, ContextItem): + file_path_in_workspace: Path + workspace_path: Path + + @property + def file_path(self) -> Path: + return self.workspace_path / self.file_path_in_workspace + + @property + def description(self) -> str: + return f"The current content of the file '{self.file_path_in_workspace}'" + + @property + def source(self) -> str: + return str(self.file_path_in_workspace) + + @property + def content(self) -> str: + # TODO: use workspace.open_file() + with open(self.file_path, "rb") as file: + return decode_textual_file(file, os.path.splitext(file.name)[1], logger) + + +class FolderContextItem(BaseModel, ContextItem): + path_in_workspace: Path + workspace_path: Path + + @property + def path(self) -> Path: + return self.workspace_path / self.path_in_workspace + + def __post_init__(self) -> None: + assert self.path.exists(), "Selected path does not exist" + assert self.path.is_dir(), "Selected path is not a directory" + + @property + def description(self) -> str: + return f"The contents of the folder '{self.path_in_workspace}' in the workspace" + + @property + def source(self) -> str: + return str(self.path_in_workspace) + + @property + def content(self) -> str: + items = [f"{p.name}{'/' if p.is_dir() else ''}" for p in self.path.iterdir()] + items.sort() + return "\n".join(items) + + +class StaticContextItem(BaseModel, ContextItem): + item_description: str = Field(alias="description") + item_source: Optional[str] = Field(alias="source") + item_content: str = Field(alias="content") diff --git a/autogpts/autogpt/autogpt/plugins/__init__.py b/autogpts/autogpt/autogpt/plugins/__init__.py new file mode 100644 index 000000000000..618a9895ab9e --- /dev/null +++ b/autogpts/autogpt/autogpt/plugins/__init__.py @@ -0,0 +1,330 @@ +"""Handles loading of plugins.""" +from __future__ import annotations + +import importlib.util +import inspect +import json +import logging +import os +import zipfile +from pathlib import Path +from typing import TYPE_CHECKING, List +from urllib.parse import urlparse +from zipimport import ZipImportError, zipimporter + +import openapi_python_client +import requests +from auto_gpt_plugin_template import AutoGPTPluginTemplate +from openapi_python_client.config import Config as OpenAPIConfig + +if TYPE_CHECKING: + from autogpt.config import Config + +from autogpt.models.base_open_ai_plugin import BaseOpenAIPlugin + +logger = logging.getLogger(__name__) + + +def inspect_zip_for_modules(zip_path: str) -> list[str]: + """ + Inspect a zipfile for a modules. + + Args: + zip_path (str): Path to the zipfile. + debug (bool, optional): Enable debug logging. Defaults to False. + + Returns: + list[str]: The list of module names found or empty list if none were found. + """ + result = [] + with zipfile.ZipFile(zip_path, "r") as zfile: + for name in zfile.namelist(): + if name.endswith("__init__.py") and not name.startswith("__MACOSX"): + logger.debug(f"Found module '{name}' in the zipfile at: {name}") + result.append(name) + if len(result) == 0: + logger.debug(f"Module '__init__.py' not found in the zipfile @ {zip_path}.") + return result + + +def write_dict_to_json_file(data: dict, file_path: str) -> None: + """ + Write a dictionary to a JSON file. + Args: + data (dict): Dictionary to write. + file_path (str): Path to the file. + """ + with open(file_path, "w") as file: + json.dump(data, file, indent=4) + + +def fetch_openai_plugins_manifest_and_spec(config: Config) -> dict: + """ + Fetch the manifest for a list of OpenAI plugins. + Args: + urls (List): List of URLs to fetch. + Returns: + dict: per url dictionary of manifest and spec. + """ + # TODO add directory scan + manifests = {} + for url in config.plugins_openai: + openai_plugin_client_dir = f"{config.plugins_dir}/openai/{urlparse(url).netloc}" + create_directory_if_not_exists(openai_plugin_client_dir) + if not os.path.exists(f"{openai_plugin_client_dir}/ai-plugin.json"): + try: + response = requests.get(f"{url}/.well-known/ai-plugin.json") + if response.status_code == 200: + manifest = response.json() + if manifest["schema_version"] != "v1": + logger.warning( + "Unsupported manifest version: " + f"{manifest['schem_version']} for {url}" + ) + continue + if manifest["api"]["type"] != "openapi": + logger.warning( + f"Unsupported API type: {manifest['api']['type']} for {url}" + ) + continue + write_dict_to_json_file( + manifest, f"{openai_plugin_client_dir}/ai-plugin.json" + ) + else: + logger.warning( + f"Failed to fetch manifest for {url}: {response.status_code}" + ) + except requests.exceptions.RequestException as e: + logger.warning(f"Error while requesting manifest from {url}: {e}") + else: + logger.info(f"Manifest for {url} already exists") + manifest = json.load(open(f"{openai_plugin_client_dir}/ai-plugin.json")) + if not os.path.exists(f"{openai_plugin_client_dir}/openapi.json"): + openapi_spec = openapi_python_client._get_document( + url=manifest["api"]["url"], path=None, timeout=5 + ) + write_dict_to_json_file( + openapi_spec, f"{openai_plugin_client_dir}/openapi.json" + ) + else: + logger.info(f"OpenAPI spec for {url} already exists") + openapi_spec = json.load(open(f"{openai_plugin_client_dir}/openapi.json")) + manifests[url] = {"manifest": manifest, "openapi_spec": openapi_spec} + return manifests + + +def create_directory_if_not_exists(directory_path: str) -> bool: + """ + Create a directory if it does not exist. + Args: + directory_path (str): Path to the directory. + Returns: + bool: True if the directory was created, else False. + """ + if not os.path.exists(directory_path): + try: + os.makedirs(directory_path) + logger.debug(f"Created directory: {directory_path}") + return True + except OSError as e: + logger.warning(f"Error creating directory {directory_path}: {e}") + return False + else: + logger.info(f"Directory {directory_path} already exists") + return True + + +def initialize_openai_plugins(manifests_specs: dict, config: Config) -> dict: + """ + Initialize OpenAI plugins. + Args: + manifests_specs (dict): per url dictionary of manifest and spec. + config (Config): Config instance including plugins config + debug (bool, optional): Enable debug logging. Defaults to False. + Returns: + dict: per url dictionary of manifest, spec and client. + """ + openai_plugins_dir = f"{config.plugins_dir}/openai" + if create_directory_if_not_exists(openai_plugins_dir): + for url, manifest_spec in manifests_specs.items(): + openai_plugin_client_dir = f"{openai_plugins_dir}/{urlparse(url).hostname}" + _meta_option = (openapi_python_client.MetaType.SETUP,) + _config = OpenAPIConfig( + **{ + "project_name_override": "client", + "package_name_override": "client", + } + ) + prev_cwd = Path.cwd() + os.chdir(openai_plugin_client_dir) + + if not os.path.exists("client"): + client_results = openapi_python_client.create_new_client( + url=manifest_spec["manifest"]["api"]["url"], + path=None, + meta=_meta_option, + config=_config, + ) + if client_results: + logger.warning( + f"Error creating OpenAPI client: {client_results[0].header} \n" + f" details: {client_results[0].detail}" + ) + continue + spec = importlib.util.spec_from_file_location( + "client", "client/client/client.py" + ) + module = importlib.util.module_from_spec(spec) + + try: + spec.loader.exec_module(module) + finally: + os.chdir(prev_cwd) + + client = module.Client(base_url=url) + manifest_spec["client"] = client + return manifests_specs + + +def instantiate_openai_plugin_clients(manifests_specs_clients: dict) -> dict: + """ + Instantiates BaseOpenAIPlugin instances for each OpenAI plugin. + Args: + manifests_specs_clients (dict): per url dictionary of manifest, spec and client. + config (Config): Config instance including plugins config + debug (bool, optional): Enable debug logging. Defaults to False. + Returns: + plugins (dict): per url dictionary of BaseOpenAIPlugin instances. + + """ + plugins = {} + for url, manifest_spec_client in manifests_specs_clients.items(): + plugins[url] = BaseOpenAIPlugin(manifest_spec_client) + return plugins + + +def scan_plugins(config: Config) -> List[AutoGPTPluginTemplate]: + """Scan the plugins directory for plugins and loads them. + + Args: + config (Config): Config instance including plugins config + debug (bool, optional): Enable debug logging. Defaults to False. + + Returns: + List[Tuple[str, Path]]: List of plugins. + """ + loaded_plugins = [] + # Generic plugins + plugins_path = Path(config.plugins_dir) + + plugins_config = config.plugins_config + # Directory-based plugins + for plugin_path in [f for f in Path(config.plugins_dir).iterdir() if f.is_dir()]: + # Avoid going into __pycache__ or other hidden directories + if plugin_path.name.startswith("__"): + continue + + plugin_module_name = plugin_path.name + qualified_module_name = ".".join(plugin_path.parts) + + try: + plugin = importlib.import_module(qualified_module_name) + except ImportError as e: + logger.error( + f"Failed to load {qualified_module_name} from {plugin_path}: {e}" + ) + continue + + if not plugins_config.is_enabled(plugin_module_name): + logger.warning( + f"Plugin folder {plugin_module_name} found but not configured. " + "If this is a legitimate plugin, please add it to plugins_config.yaml " + f"(key: {plugin_module_name})." + ) + continue + + for _, class_obj in inspect.getmembers(plugin): + if ( + hasattr(class_obj, "_abc_impl") + and AutoGPTPluginTemplate in class_obj.__bases__ + ): + loaded_plugins.append(class_obj()) + + # Zip-based plugins + for plugin in plugins_path.glob("*.zip"): + if moduleList := inspect_zip_for_modules(str(plugin)): + for module in moduleList: + plugin = Path(plugin) + module = Path(module) + logger.debug(f"Zipped Plugin: {plugin}, Module: {module}") + zipped_package = zipimporter(str(plugin)) + try: + zipped_module = zipped_package.load_module(str(module.parent)) + except ZipImportError as e: + logger.error(f"Failed to load {module.parent} from {plugin}: {e}") + continue + + for key in dir(zipped_module): + if key.startswith("__"): + continue + + a_module = getattr(zipped_module, key) + if not inspect.isclass(a_module): + continue + + if ( + issubclass(a_module, AutoGPTPluginTemplate) + and a_module.__name__ != "AutoGPTPluginTemplate" + ): + plugin_name = a_module.__name__ + plugin_configured = plugins_config.get(plugin_name) is not None + plugin_enabled = plugins_config.is_enabled(plugin_name) + + if plugin_configured and plugin_enabled: + logger.debug( + f"Loading plugin {plugin_name}. " + "Enabled in plugins_config.yaml." + ) + loaded_plugins.append(a_module()) + elif plugin_configured and not plugin_enabled: + logger.debug( + f"Not loading plugin {plugin_name}. " + "Disabled in plugins_config.yaml." + ) + elif not plugin_configured: + logger.warning( + f"Not loading plugin {plugin_name}. " + f"No entry for '{plugin_name}' in plugins_config.yaml. " + "Note: Zipped plugins should use the class name " + f"({plugin_name}) as the key." + ) + else: + if ( + module_name := getattr(a_module, "__name__", str(a_module)) + ) != "AutoGPTPluginTemplate": + logger.debug( + f"Skipping '{module_name}' because it doesn't subclass " + "AutoGPTPluginTemplate." + ) + + # OpenAI plugins + if config.plugins_openai: + manifests_specs = fetch_openai_plugins_manifest_and_spec(config) + if manifests_specs.keys(): + manifests_specs_clients = initialize_openai_plugins(manifests_specs, config) + for url, openai_plugin_meta in manifests_specs_clients.items(): + if not plugins_config.is_enabled(url): + plugin_name = openai_plugin_meta["manifest"]["name_for_model"] + logger.warning( + f"OpenAI Plugin {plugin_name} found but not configured" + ) + continue + + plugin = BaseOpenAIPlugin(openai_plugin_meta) + loaded_plugins.append(plugin) + + if loaded_plugins: + logger.info(f"\nPlugins found: {len(loaded_plugins)}\n" "--------------------") + for plugin in loaded_plugins: + logger.info(f"{plugin._name}: {plugin._version} - {plugin._description}") + return loaded_plugins diff --git a/autogpts/autogpt/autogpt/plugins/plugin_config.py b/autogpts/autogpt/autogpt/plugins/plugin_config.py new file mode 100644 index 000000000000..bdf77d832fd8 --- /dev/null +++ b/autogpts/autogpt/autogpt/plugins/plugin_config.py @@ -0,0 +1,11 @@ +from typing import Any + +from pydantic import BaseModel + + +class PluginConfig(BaseModel): + """Class for holding configuration of a single plugin""" + + name: str + enabled: bool = False + config: dict[str, Any] = None diff --git a/autogpts/autogpt/autogpt/plugins/plugins_config.py b/autogpts/autogpt/autogpt/plugins/plugins_config.py new file mode 100644 index 000000000000..ad96d4a378d4 --- /dev/null +++ b/autogpts/autogpt/autogpt/plugins/plugins_config.py @@ -0,0 +1,118 @@ +from __future__ import annotations + +import logging +from pathlib import Path +from typing import Union + +import yaml +from pydantic import BaseModel + +from autogpt.plugins.plugin_config import PluginConfig + +logger = logging.getLogger(__name__) + + +class PluginsConfig(BaseModel): + """Class for holding configuration of all plugins""" + + plugins: dict[str, PluginConfig] + + def __repr__(self): + return f"PluginsConfig({self.plugins})" + + def get(self, name: str) -> Union[PluginConfig, None]: + return self.plugins.get(name) + + def is_enabled(self, name) -> bool: + plugin_config = self.plugins.get(name) + return plugin_config is not None and plugin_config.enabled + + @classmethod + def load_config( + cls, + plugins_config_file: Path, + plugins_denylist: list[str], + plugins_allowlist: list[str], + ) -> "PluginsConfig": + empty_config = cls(plugins={}) + + try: + config_data = cls.deserialize_config_file( + plugins_config_file, + plugins_denylist, + plugins_allowlist, + ) + if type(config_data) is not dict: + logger.error( + f"Expected plugins config to be a dict, got {type(config_data)}." + " Continuing without plugins." + ) + return empty_config + return cls(plugins=config_data) + + except BaseException as e: + logger.error( + f"Plugin config is invalid. Continuing without plugins. Error: {e}" + ) + return empty_config + + @classmethod + def deserialize_config_file( + cls, + plugins_config_file: Path, + plugins_denylist: list[str], + plugins_allowlist: list[str], + ) -> dict[str, PluginConfig]: + if not plugins_config_file.is_file(): + logger.warning("plugins_config.yaml does not exist, creating base config.") + cls.create_empty_plugins_config( + plugins_config_file, + plugins_denylist, + plugins_allowlist, + ) + + with open(plugins_config_file, "r") as f: + plugins_config = yaml.load(f, Loader=yaml.SafeLoader) + + plugins = {} + for name, plugin in plugins_config.items(): + if type(plugin) is dict: + plugins[name] = PluginConfig( + name=name, + enabled=plugin.get("enabled", False), + config=plugin.get("config", {}), + ) + elif isinstance(plugin, PluginConfig): + plugins[name] = plugin + else: + raise ValueError(f"Invalid plugin config data type: {type(plugin)}") + return plugins + + @staticmethod + def create_empty_plugins_config( + plugins_config_file: Path, + plugins_denylist: list[str], + plugins_allowlist: list[str], + ): + """ + Create an empty plugins_config.yaml file. + Fill it with values from old env variables. + """ + base_config = {} + + logger.debug(f"Legacy plugin denylist: {plugins_denylist}") + logger.debug(f"Legacy plugin allowlist: {plugins_allowlist}") + + # Backwards-compatibility shim + for plugin_name in plugins_denylist: + base_config[plugin_name] = {"enabled": False, "config": {}} + + for plugin_name in plugins_allowlist: + base_config[plugin_name] = {"enabled": True, "config": {}} + + logger.debug(f"Constructed base plugins config: {base_config}") + + logger.debug(f"Creating plugin config file {plugins_config_file}") + with open(plugins_config_file, "w+") as f: + f.write(yaml.dump(base_config)) + return base_config diff --git a/autogpts/autogpt/autogpt/processing/__init__.py b/autogpts/autogpt/autogpt/processing/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/autogpts/autogpt/autogpt/processing/html.py b/autogpts/autogpt/autogpt/processing/html.py new file mode 100644 index 000000000000..73c65b9c996f --- /dev/null +++ b/autogpts/autogpt/autogpt/processing/html.py @@ -0,0 +1,33 @@ +"""HTML processing functions""" +from __future__ import annotations + +from bs4 import BeautifulSoup +from requests.compat import urljoin + + +def extract_hyperlinks(soup: BeautifulSoup, base_url: str) -> list[tuple[str, str]]: + """Extract hyperlinks from a BeautifulSoup object + + Args: + soup (BeautifulSoup): The BeautifulSoup object + base_url (str): The base URL + + Returns: + List[Tuple[str, str]]: The extracted hyperlinks + """ + return [ + (link.text, urljoin(base_url, link["href"])) + for link in soup.find_all("a", href=True) + ] + + +def format_hyperlinks(hyperlinks: list[tuple[str, str]]) -> list[str]: + """Format hyperlinks to be displayed to the user + + Args: + hyperlinks (List[Tuple[str, str]]): The hyperlinks to format + + Returns: + List[str]: The formatted hyperlinks + """ + return [f"{link_text.strip()} ({link_url})" for link_text, link_url in hyperlinks] diff --git a/autogpts/autogpt/autogpt/processing/text.py b/autogpts/autogpt/autogpt/processing/text.py new file mode 100644 index 000000000000..4cebbabd648b --- /dev/null +++ b/autogpts/autogpt/autogpt/processing/text.py @@ -0,0 +1,318 @@ +"""Text processing functions""" + +import logging +import math +from typing import Iterator, Optional, TypeVar + +import spacy + +from autogpt.config import Config +from autogpt.core.prompting import ChatPrompt +from autogpt.core.resource.model_providers import ( + ChatMessage, + ChatModelProvider, + ModelTokenizer, +) +from autogpt.core.utils.json_utils import extract_list_from_json + +logger = logging.getLogger(__name__) + +T = TypeVar("T") + + +def batch( + sequence: list[T], max_batch_length: int, overlap: int = 0 +) -> Iterator[list[T]]: + """ + Batch data from iterable into slices of length N. The last batch may be shorter. + + Example: `batched('ABCDEFGHIJ', 3)` --> `ABC DEF GHI J` + """ + if max_batch_length < 1: + raise ValueError("n must be at least one") + for i in range(0, len(sequence), max_batch_length - overlap): + yield sequence[i : i + max_batch_length] + + +def chunk_content( + content: str, + max_chunk_length: int, + tokenizer: ModelTokenizer, + with_overlap: bool = True, +) -> Iterator[tuple[str, int]]: + """Split content into chunks of approximately equal token length.""" + + MAX_OVERLAP = 200 # limit overlap to save tokens + + tokenized_text = tokenizer.encode(content) + total_length = len(tokenized_text) + n_chunks = math.ceil(total_length / max_chunk_length) + + chunk_length = math.ceil(total_length / n_chunks) + overlap = min(max_chunk_length - chunk_length, MAX_OVERLAP) if with_overlap else 0 + + for token_batch in batch(tokenized_text, chunk_length + overlap, overlap): + yield tokenizer.decode(token_batch), len(token_batch) + + +async def summarize_text( + text: str, + llm_provider: ChatModelProvider, + config: Config, + question: Optional[str] = None, + instruction: Optional[str] = None, +) -> tuple[str, list[tuple[str, str]]]: + if question: + if instruction: + raise ValueError( + "Parameters 'question' and 'instructions' cannot both be set" + ) + + instruction = ( + f'From the text, answer the question: "{question}". ' + "If the answer is not in the text, indicate this clearly " + "and concisely state why the text is not suitable to answer the question." + ) + elif not instruction: + instruction = ( + "Summarize or describe the text clearly and concisely, " + "whichever seems more appropriate." + ) + + return await _process_text( # type: ignore + text=text, + instruction=instruction, + llm_provider=llm_provider, + config=config, + ) + + +async def extract_information( + source_text: str, + topics_of_interest: list[str], + llm_provider: ChatModelProvider, + config: Config, +) -> list[str]: + fmt_topics_list = "\n".join(f"* {topic}." for topic in topics_of_interest) + instruction = ( + "Extract relevant pieces of information about the following topics:\n" + f"{fmt_topics_list}\n" + "Reword pieces of information if needed to make them self-explanatory. " + "Be concise.\n\n" + "Respond with an `Array` in JSON format AND NOTHING ELSE. " + 'If the text contains no relevant information, return "[]".' + ) + return await _process_text( # type: ignore + text=source_text, + instruction=instruction, + output_type=list[str], + llm_provider=llm_provider, + config=config, + ) + + +async def _process_text( + text: str, + instruction: str, + llm_provider: ChatModelProvider, + config: Config, + output_type: type[str | list[str]] = str, +) -> tuple[str, list[tuple[str, str]]] | list[str]: + """Process text using the OpenAI API for summarization or information extraction + + Params: + text (str): The text to process. + instruction (str): Additional instruction for processing. + llm_provider: LLM provider to use. + config (Config): The global application config. + output_type: `str` for summaries or `list[str]` for piece-wise info extraction. + + Returns: + For summarization: tuple[str, None | list[(summary, chunk)]] + For piece-wise information extraction: list[str] + """ + if not text.strip(): + raise ValueError("No content") + + model = config.fast_llm + + text_tlength = llm_provider.count_tokens(text, model) + logger.debug(f"Text length: {text_tlength} tokens") + + max_result_tokens = 500 + max_chunk_length = llm_provider.get_token_limit(model) - max_result_tokens - 50 + logger.debug(f"Max chunk length: {max_chunk_length} tokens") + + if text_tlength < max_chunk_length: + prompt = ChatPrompt( + messages=[ + ChatMessage.system( + "The user is going to give you a text enclosed in triple quotes. " + f"{instruction}" + ), + ChatMessage.user(f'"""{text}"""'), + ] + ) + + logger.debug(f"PROCESSING:\n{prompt}") + + response = await llm_provider.create_chat_completion( + model_prompt=prompt.messages, + model_name=model, + temperature=0.5, + max_output_tokens=max_result_tokens, + completion_parser=lambda s: ( + extract_list_from_json(s.content) if output_type is not str else None + ), + ) + + if output_type == list[str]: + logger.debug(f"Raw LLM response: {repr(response.response.content)}") + fmt_result_bullet_list = "\n".join(f"* {r}" for r in response.parsed_result) + logger.debug( + f"\n{'-'*11} EXTRACTION RESULT {'-'*12}\n" + f"{fmt_result_bullet_list}\n" + f"{'-'*42}\n" + ) + return response.parsed_result + else: + summary = response.response.content + logger.debug(f"\n{'-'*16} SUMMARY {'-'*17}\n{summary}\n{'-'*42}\n") + return summary.strip(), [(summary, text)] + else: + chunks = list( + split_text( + text, + config=config, + max_chunk_length=max_chunk_length, + tokenizer=llm_provider.get_tokenizer(model), + ) + ) + + processed_results = [] + for i, (chunk, _) in enumerate(chunks): + logger.info(f"Processing chunk {i + 1} / {len(chunks)}") + chunk_result = await _process_text( + text=chunk, + instruction=instruction, + output_type=output_type, + llm_provider=llm_provider, + config=config, + ) + processed_results.extend( + chunk_result if output_type == list[str] else [chunk_result] + ) + + if output_type == list[str]: + return processed_results + else: + summary, _ = await _process_text( + "\n\n".join([result[0] for result in processed_results]), + instruction=( + "The text consists of multiple partial summaries. " + "Combine these partial summaries into one." + ), + llm_provider=llm_provider, + config=config, + ) + return summary.strip(), [ + (processed_results[i], chunks[i][0]) for i in range(0, len(chunks)) + ] + + +def split_text( + text: str, + config: Config, + max_chunk_length: int, + tokenizer: ModelTokenizer, + with_overlap: bool = True, +) -> Iterator[tuple[str, int]]: + """ + Split text into chunks of sentences, with each chunk not exceeding the max length. + + Args: + text (str): The text to split. + config (Config): Config object containing the Spacy model setting. + max_chunk_length (int, optional): The maximum length of a chunk. + tokenizer (ModelTokenizer): Tokenizer to use for determining chunk length. + with_overlap (bool, optional): Whether to allow overlap between chunks. + + Yields: + str: The next chunk of text + + Raises: + ValueError: when a sentence is longer than the maximum length + """ + text_length = len(tokenizer.encode(text)) + + if text_length < max_chunk_length: + yield text, text_length + return + + n_chunks = math.ceil(text_length / max_chunk_length) + target_chunk_length = math.ceil(text_length / n_chunks) + + nlp: spacy.language.Language = spacy.load(config.browse_spacy_language_model) + nlp.add_pipe("sentencizer") + doc = nlp(text) + sentences = [sentence.text.strip() for sentence in doc.sents] + + current_chunk: list[str] = [] + current_chunk_length = 0 + last_sentence = None + last_sentence_length = 0 + + i = 0 + while i < len(sentences): + sentence = sentences[i] + sentence_length = len(tokenizer.encode(sentence)) + expected_chunk_length = current_chunk_length + 1 + sentence_length + + if ( + expected_chunk_length < max_chunk_length + # try to create chunks of approximately equal size + and expected_chunk_length - (sentence_length / 2) < target_chunk_length + ): + current_chunk.append(sentence) + current_chunk_length = expected_chunk_length + + elif sentence_length < max_chunk_length: + if last_sentence: + yield " ".join(current_chunk), current_chunk_length + current_chunk = [] + current_chunk_length = 0 + + if with_overlap: + overlap_max_length = max_chunk_length - sentence_length - 1 + if last_sentence_length < overlap_max_length: + current_chunk += [last_sentence] + current_chunk_length += last_sentence_length + 1 + elif overlap_max_length > 5: + # add as much from the end of the last sentence as fits + current_chunk += [ + list( + chunk_content( + content=last_sentence, + max_chunk_length=overlap_max_length, + tokenizer=tokenizer, + ) + ).pop()[0], + ] + current_chunk_length += overlap_max_length + 1 + + current_chunk += [sentence] + current_chunk_length += sentence_length + + else: # sentence longer than maximum length -> chop up and try again + sentences[i : i + 1] = [ + chunk + for chunk, _ in chunk_content(sentence, target_chunk_length, tokenizer) + ] + continue + + i += 1 + last_sentence = sentence + last_sentence_length = sentence_length + + if current_chunk: + yield " ".join(current_chunk), current_chunk_length diff --git a/autogpts/autogpt/autogpt/prompts/__init__.py b/autogpts/autogpt/autogpt/prompts/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/autogpts/autogpt/autogpt/prompts/prompt.py b/autogpts/autogpt/autogpt/prompts/prompt.py new file mode 100644 index 000000000000..c9076aa9eb39 --- /dev/null +++ b/autogpts/autogpt/autogpt/prompts/prompt.py @@ -0,0 +1,5 @@ +DEFAULT_TRIGGERING_PROMPT = ( + "Determine exactly one command to use next based on the given goals " + "and the progress you have made so far, " + "and respond using the JSON schema specified previously:" +) diff --git a/autogpts/autogpt/autogpt/prompts/utils.py b/autogpts/autogpt/autogpt/prompts/utils.py new file mode 100644 index 000000000000..f5ab9df9d928 --- /dev/null +++ b/autogpts/autogpt/autogpt/prompts/utils.py @@ -0,0 +1,11 @@ +from typing import Any + + +def format_numbered_list(items: list[Any], start_at: int = 1) -> str: + return "\n".join(f"{i}. {str(item)}" for i, item in enumerate(items, start_at)) + + +def indent(content: str, indentation: int | str = 4) -> str: + if type(indentation) is int: + indentation = " " * indentation + return indentation + content.replace("\n", f"\n{indentation}") # type: ignore diff --git a/autogpts/autogpt/autogpt/singleton.py b/autogpts/autogpt/autogpt/singleton.py new file mode 100644 index 000000000000..46c6256e0872 --- /dev/null +++ b/autogpts/autogpt/autogpt/singleton.py @@ -0,0 +1,16 @@ +"""The singleton metaclass for ensuring only one instance of a class.""" +import abc + + +class Singleton(abc.ABCMeta, type): + """ + Singleton metaclass for ensuring only one instance of a class. + """ + + _instances = {} + + def __call__(cls, *args, **kwargs): + """Call method for the singleton metaclass.""" + if cls not in cls._instances: + cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) + return cls._instances[cls] diff --git a/autogpts/autogpt/autogpt/speech/__init__.py b/autogpts/autogpt/autogpt/speech/__init__.py new file mode 100644 index 000000000000..d5f0f2e0f3dd --- /dev/null +++ b/autogpts/autogpt/autogpt/speech/__init__.py @@ -0,0 +1,4 @@ +"""This module contains the speech recognition and speech synthesis functions.""" +from autogpt.speech.say import TextToSpeechProvider, TTSConfig + +__all__ = ["TextToSpeechProvider", "TTSConfig"] diff --git a/autogpts/autogpt/autogpt/speech/base.py b/autogpts/autogpt/autogpt/speech/base.py new file mode 100644 index 000000000000..fd9fda60fb63 --- /dev/null +++ b/autogpts/autogpt/autogpt/speech/base.py @@ -0,0 +1,54 @@ +"""Base class for all voice classes.""" +from __future__ import annotations + +import abc +import re +from threading import Lock + + +class VoiceBase: + """ + Base class for all voice classes. + """ + + def __init__(self, *args, **kwargs): + """ + Initialize the voice class. + """ + self._url = None + self._headers = None + self._api_key = None + self._voices = [] + self._mutex = Lock() + self._setup(*args, **kwargs) + + def say(self, text: str, voice_index: int = 0) -> bool: + """ + Say the given text. + + Args: + text (str): The text to say. + voice_index (int): The index of the voice to use. + """ + text = re.sub( + r"\b(?:https?://[-\w_.]+/?\w[-\w_.]*\.(?:[-\w_.]+/?\w[-\w_.]*\.)?[a-z]+(?:/[-\w_.%]+)*\b(?!\.))", # noqa: E501 + "", + text, + ) + with self._mutex: + return self._speech(text, voice_index) + + @abc.abstractmethod + def _setup(self, *args, **kwargs) -> None: + """ + Setup the voices, API key, etc. + """ + + @abc.abstractmethod + def _speech(self, text: str, voice_index: int = 0) -> bool: + """ + Play the given text. + + Args: + text (str): The text to play. + """ diff --git a/autogpts/autogpt/autogpt/speech/eleven_labs.py b/autogpts/autogpt/autogpt/speech/eleven_labs.py new file mode 100644 index 000000000000..897f0fd7d20e --- /dev/null +++ b/autogpts/autogpt/autogpt/speech/eleven_labs.py @@ -0,0 +1,93 @@ +"""ElevenLabs speech module""" +from __future__ import annotations + +import logging +import os + +import requests +from playsound import playsound + +from autogpt.core.configuration import SystemConfiguration, UserConfigurable + +from .base import VoiceBase + +logger = logging.getLogger(__name__) + +PLACEHOLDERS = {"your-voice-id"} + + +class ElevenLabsConfig(SystemConfiguration): + api_key: str = UserConfigurable(from_env="ELEVENLABS_API_KEY") + voice_id: str = UserConfigurable(from_env="ELEVENLABS_VOICE_ID") + + +class ElevenLabsSpeech(VoiceBase): + """ElevenLabs speech class""" + + def _setup(self, config: ElevenLabsConfig) -> None: + """Set up the voices, API key, etc. + + Returns: + None: None + """ + + default_voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"] + voice_options = { + "Rachel": "21m00Tcm4TlvDq8ikWAM", + "Domi": "AZnzlk1XvdvUeBnXmlld", + "Bella": "EXAVITQu4vr4xnSDxMaL", + "Antoni": "ErXwobaYiN019PkySvjV", + "Elli": "MF3mGyEYCl7XYWbV9V6O", + "Josh": "TxGEqnHWrfWFTfGW9XjX", + "Arnold": "VR6AewLTigWG4xSOukaG", + "Adam": "pNInz6obpgDQGcFmaJgB", + "Sam": "yoZ06aMxZJJ28mfd3POQ", + } + self._headers = { + "Content-Type": "application/json", + "xi-api-key": config.api_key, + } + self._voices = default_voices.copy() + if config.voice_id in voice_options: + config.voice_id = voice_options[config.voice_id] + self._use_custom_voice(config.voice_id, 0) + + def _use_custom_voice(self, voice, voice_index) -> None: + """Use a custom voice if provided and not a placeholder + + Args: + voice (str): The voice ID + voice_index (int): The voice index + + Returns: + None: None + """ + # Placeholder values that should be treated as empty + if voice and voice not in PLACEHOLDERS: + self._voices[voice_index] = voice + + def _speech(self, text: str, voice_index: int = 0) -> bool: + """Speak text using elevenlabs.io's API + + Args: + text (str): The text to speak + voice_index (int, optional): The voice to use. Defaults to 0. + + Returns: + bool: True if the request was successful, False otherwise + """ + tts_url = ( + f"https://api.elevenlabs.io/v1/text-to-speech/{self._voices[voice_index]}" + ) + response = requests.post(tts_url, headers=self._headers, json={"text": text}) + + if response.status_code == 200: + with open("speech.mpeg", "wb") as f: + f.write(response.content) + playsound("speech.mpeg", True) + os.remove("speech.mpeg") + return True + else: + logger.warning("Request failed with status code:", response.status_code) + logger.info("Response content:", response.content) + return False diff --git a/autogpts/autogpt/autogpt/speech/gtts.py b/autogpts/autogpt/autogpt/speech/gtts.py new file mode 100644 index 000000000000..40f7bcb974ad --- /dev/null +++ b/autogpts/autogpt/autogpt/speech/gtts.py @@ -0,0 +1,24 @@ +""" GTTS Voice. """ +from __future__ import annotations + +import os + +import gtts +from playsound import playsound + +from autogpt.speech.base import VoiceBase + + +class GTTSVoice(VoiceBase): + """GTTS Voice.""" + + def _setup(self) -> None: + pass + + def _speech(self, text: str, _: int = 0) -> bool: + """Play the given text.""" + tts = gtts.gTTS(text) + tts.save("speech.mp3") + playsound("speech.mp3", True) + os.remove("speech.mp3") + return True diff --git a/autogpts/autogpt/autogpt/speech/macos_tts.py b/autogpts/autogpt/autogpt/speech/macos_tts.py new file mode 100644 index 000000000000..6a1dd99d5102 --- /dev/null +++ b/autogpts/autogpt/autogpt/speech/macos_tts.py @@ -0,0 +1,23 @@ +""" MacOS TTS Voice. """ +from __future__ import annotations + +import subprocess + +from autogpt.speech.base import VoiceBase + + +class MacOSTTS(VoiceBase): + """MacOS TTS Voice.""" + + def _setup(self) -> None: + pass + + def _speech(self, text: str, voice_index: int = 0) -> bool: + """Play the given text.""" + if voice_index == 0: + subprocess.run(["say", text], shell=False) + elif voice_index == 1: + subprocess.run(["say", "-v", "Ava (Premium)", text], shell=False) + else: + subprocess.run(["say", "-v", "Samantha", text], shell=False) + return True diff --git a/autogpts/autogpt/autogpt/speech/say.py b/autogpts/autogpt/autogpt/speech/say.py new file mode 100644 index 000000000000..04ab3a4bc88d --- /dev/null +++ b/autogpts/autogpt/autogpt/speech/say.py @@ -0,0 +1,79 @@ +""" Text to speech module """ +from __future__ import annotations + +import os +import threading +from threading import Semaphore +from typing import Literal, Optional + +from autogpt.core.configuration.schema import SystemConfiguration, UserConfigurable + +from .base import VoiceBase +from .eleven_labs import ElevenLabsConfig, ElevenLabsSpeech +from .gtts import GTTSVoice +from .macos_tts import MacOSTTS +from .stream_elements_speech import StreamElementsConfig, StreamElementsSpeech + +_QUEUE_SEMAPHORE = Semaphore( + 1 +) # The amount of sounds to queue before blocking the main thread + + +class TTSConfig(SystemConfiguration): + speak_mode: bool = False + elevenlabs: Optional[ElevenLabsConfig] = None + streamelements: Optional[StreamElementsConfig] = None + provider: Literal[ + "elevenlabs", "gtts", "macos", "streamelements" + ] = UserConfigurable( + default="gtts", + from_env=lambda: os.getenv("TEXT_TO_SPEECH_PROVIDER") + or ( + "macos" + if os.getenv("USE_MAC_OS_TTS") + else "elevenlabs" + if os.getenv("ELEVENLABS_API_KEY") + else "streamelements" + if os.getenv("USE_BRIAN_TTS") + else "gtts" + ), + ) # type: ignore + + +class TextToSpeechProvider: + def __init__(self, config: TTSConfig): + self._config = config + self._default_voice_engine, self._voice_engine = self._get_voice_engine(config) + + def say(self, text, voice_index: int = 0) -> None: + def _speak() -> None: + success = self._voice_engine.say(text, voice_index) + if not success: + self._default_voice_engine.say(text, voice_index) + _QUEUE_SEMAPHORE.release() + + if self._config.speak_mode: + _QUEUE_SEMAPHORE.acquire(True) + thread = threading.Thread(target=_speak) + thread.start() + + def __repr__(self): + return "{class_name}(provider={voice_engine_name})".format( + class_name=self.__class__.__name__, + voice_engine_name=self._voice_engine.__class__.__name__, + ) + + @staticmethod + def _get_voice_engine(config: TTSConfig) -> tuple[VoiceBase, VoiceBase]: + """Get the voice engine to use for the given configuration""" + tts_provider = config.provider + if tts_provider == "elevenlabs": + voice_engine = ElevenLabsSpeech(config.elevenlabs) + elif tts_provider == "macos": + voice_engine = MacOSTTS() + elif tts_provider == "streamelements": + voice_engine = StreamElementsSpeech(config.streamelements) + else: + voice_engine = GTTSVoice() + + return GTTSVoice(), voice_engine diff --git a/autogpts/autogpt/autogpt/speech/stream_elements_speech.py b/autogpts/autogpt/autogpt/speech/stream_elements_speech.py new file mode 100644 index 000000000000..e12b29b2dc52 --- /dev/null +++ b/autogpts/autogpt/autogpt/speech/stream_elements_speech.py @@ -0,0 +1,54 @@ +from __future__ import annotations + +import logging +import os + +import requests +from playsound import playsound + +from autogpt.core.configuration import SystemConfiguration, UserConfigurable +from autogpt.speech.base import VoiceBase + +logger = logging.getLogger(__name__) + + +class StreamElementsConfig(SystemConfiguration): + voice: str = UserConfigurable(default="Brian", from_env="STREAMELEMENTS_VOICE") + + +class StreamElementsSpeech(VoiceBase): + """Streamelements speech module for autogpt""" + + def _setup(self, config: StreamElementsConfig) -> None: + """Setup the voices, API key, etc.""" + self.config = config + + def _speech(self, text: str, voice: str, _: int = 0) -> bool: + voice = self.config.voice + """Speak text using the streamelements API + + Args: + text (str): The text to speak + voice (str): The voice to use + + Returns: + bool: True if the request was successful, False otherwise + """ + tts_url = ( + f"https://api.streamelements.com/kappa/v2/speech?voice={voice}&text={text}" + ) + response = requests.get(tts_url) + + if response.status_code == 200: + with open("speech.mp3", "wb") as f: + f.write(response.content) + playsound("speech.mp3") + os.remove("speech.mp3") + return True + else: + logger.error( + "Request failed with status code: %s, response content: %s", + response.status_code, + response.content, + ) + return False diff --git a/autogpts/autogpt/autogpt/url_utils/__init__.py b/autogpts/autogpt/autogpt/url_utils/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/autogpts/autogpt/autogpt/url_utils/validators.py b/autogpts/autogpt/autogpt/url_utils/validators.py new file mode 100644 index 000000000000..5f8d2ffc838c --- /dev/null +++ b/autogpts/autogpt/autogpt/url_utils/validators.py @@ -0,0 +1,92 @@ +import functools +import re +from typing import Any, Callable, ParamSpec, TypeVar +from urllib.parse import urljoin, urlparse + +P = ParamSpec("P") +T = TypeVar("T") + + +def validate_url(func: Callable[P, T]) -> Callable[P, T]: + """ + The method decorator validate_url is used to validate urls for any command that + requires a url as an argument. + """ + + @functools.wraps(func) + def wrapper(url: str, *args, **kwargs) -> Any: + """Check if the URL is valid and not a local file accessor. + + Args: + url (str): The URL to check + + Returns: + the result of the wrapped function + + Raises: + ValueError if the url fails any of the validation tests + """ + + # Most basic check if the URL is valid: + if not re.match(r"^https?://", url): + raise ValueError("Invalid URL format") + if not is_valid_url(url): + raise ValueError("Missing Scheme or Network location") + # Restrict access to local files + if check_local_file_access(url): + raise ValueError("Access to local files is restricted") + # Check URL length + if len(url) > 2000: + raise ValueError("URL is too long") + + return func(sanitize_url(url), *args, **kwargs) + + return wrapper + + +def is_valid_url(url: str) -> bool: + """Check if the URL is valid + + Args: + url (str): The URL to check + + Returns: + bool: True if the URL is valid, False otherwise + """ + try: + result = urlparse(url) + return all([result.scheme, result.netloc]) + except ValueError: + return False + + +def sanitize_url(url: str) -> str: + """Sanitize the URL + + Args: + url (str): The URL to sanitize + + Returns: + str: The sanitized URL + """ + parsed_url = urlparse(url) + reconstructed_url = f"{parsed_url.path}{parsed_url.params}?{parsed_url.query}" + return urljoin(url, reconstructed_url) + + +def check_local_file_access(url: str) -> bool: + """Check if the URL is a local file + + Args: + url (str): The URL to check + + Returns: + bool: True if the URL is a local file, False otherwise + """ + # List of local file prefixes + local_file_prefixes = [ + "file:///", + "file://localhost", + ] + + return any(url.startswith(prefix) for prefix in local_file_prefixes) diff --git a/autogpts/autogpt/autogpt/utils.py b/autogpts/autogpt/autogpt/utils.py new file mode 100644 index 000000000000..18a7a6389342 --- /dev/null +++ b/autogpts/autogpt/autogpt/utils.py @@ -0,0 +1,19 @@ +from pathlib import Path + +import yaml +from colorama import Fore + + +def validate_yaml_file(file: str | Path): + try: + with open(file, encoding="utf-8") as fp: + yaml.load(fp.read(), Loader=yaml.SafeLoader) + except FileNotFoundError: + return (False, f"The file {Fore.CYAN}`{file}`{Fore.RESET} wasn't found") + except yaml.YAMLError as e: + return ( + False, + f"There was an issue while trying to read with your AI Settings file: {e}", + ) + + return (True, f"Successfully validated {Fore.CYAN}`{file}`{Fore.RESET}!") diff --git a/autogpts/autogpt/azure.yaml.template b/autogpts/autogpt/azure.yaml.template new file mode 100644 index 000000000000..d05a2c3f743d --- /dev/null +++ b/autogpts/autogpt/azure.yaml.template @@ -0,0 +1,7 @@ +azure_api_type: azure +azure_api_version: api-version-for-azure +azure_endpoint: your-azure-openai-endpoint +azure_model_map: + gpt-3.5-turbo-0125: gpt35-deployment-id-for-azure + gpt-4-turbo-preview: gpt4-deployment-id-for-azure + text-embedding-3-small: embedding-deployment-id-for-azure diff --git a/autogpts/autogpt/challenges_already_beaten.json b/autogpts/autogpt/challenges_already_beaten.json new file mode 100644 index 000000000000..7bdab6f24a36 --- /dev/null +++ b/autogpts/autogpt/challenges_already_beaten.json @@ -0,0 +1,3 @@ +{ + "TestWriteFile": true +} \ No newline at end of file diff --git a/autogpts/autogpt/codecov.yml b/autogpts/autogpt/codecov.yml new file mode 100644 index 000000000000..15a83ad26ead --- /dev/null +++ b/autogpts/autogpt/codecov.yml @@ -0,0 +1,18 @@ +coverage: + status: + project: + default: + target: auto + threshold: 1% + informational: true + patch: + default: + target: 80% + +## Please add this section once you've separated your coverage uploads for unit and integration tests +# +# flags: +# unit-tests: +# carryforward: true +# integration-tests: +# carryforward: true diff --git a/autogpts/autogpt/data/.keep b/autogpts/autogpt/data/.keep new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/autogpts/autogpt/docker-compose.yml b/autogpts/autogpt/docker-compose.yml new file mode 100644 index 000000000000..281c8f697645 --- /dev/null +++ b/autogpts/autogpt/docker-compose.yml @@ -0,0 +1,49 @@ +# To boot the app run the following: +# docker compose run auto-gpt +# NOTE: Version 3.9 requires at least Docker Compose version 2 and Docker Engine version 20.10.13! + +version: "3.9" + +services: + auto-gpt: + build: ./ + env_file: + - .env + ports: + - "8000:8000" + volumes: + - ./:/app + - ./docker-compose.yml:/app/docker-compose.yml:ro + - ./Dockerfile:/app/Dockerfile:ro + profiles: ["exclude-from-up"] + + # Only for TESTING purposes. Run with: docker compose run --build --rm autogpt-test + autogpt-test: + build: ./ + env_file: + - .env + environment: + S3_ENDPOINT_URL: http://minio:9000 + AWS_ACCESS_KEY_ID: minio + AWS_SECRET_ACCESS_KEY: minio123 + entrypoint: ["poetry", "run"] + command: ["pytest", "-v"] + volumes: + - ./autogpt:/app/autogpt + - ./tests:/app/tests + depends_on: + - minio + profiles: ["exclude-from-up"] + minio: + image: minio/minio + environment: + MINIO_ACCESS_KEY: minio + MINIO_SECRET_KEY: minio123 + ports: + - 9000:9000 + volumes: + - minio-data:/data + command: server /data + profiles: ["exclude-from-up"] +volumes: + minio-data: diff --git a/autogpts/autogpt/hooks/post-checkout b/autogpts/autogpt/hooks/post-checkout new file mode 100644 index 000000000000..98de9285fdcd --- /dev/null +++ b/autogpts/autogpt/hooks/post-checkout @@ -0,0 +1,2 @@ +#!/bin/sh +git submodule update --init --remote --recursive diff --git a/autogpts/autogpt/hooks/post-rewrite b/autogpts/autogpt/hooks/post-rewrite new file mode 100644 index 000000000000..13304744ee8a --- /dev/null +++ b/autogpts/autogpt/hooks/post-rewrite @@ -0,0 +1,4 @@ +#!/bin/sh +case "$1" in + rebase) git submodule update --init --recursive ;; +esac diff --git a/autogpts/autogpt/plugin.png b/autogpts/autogpt/plugin.png new file mode 100644 index 0000000000000000000000000000000000000000..865ce3c922d7783efde0ebaa273519eca202d654 GIT binary patch literal 33356 zcmd42Ra9I-*DVTxU_pZh4M76|f;&xc3GVLh?vg+P!QGt%clXBK-QC?9X!LHr@0@?! zhx>TXxc8w)Lv`(vU8~lbbJnUq^0MM+D1<05FfeG65+aH)FtB~VrvwQBC@C1TrU$-| z?IkpvU|`U@Uq7(%^yoxDA)>RSj40wB{5x2V_Zd!f3jZk*Rd@d8;%si`2vb-#ECCdu zIs-)_CQe3<7WU2-cD69s$mA42Im&Cfu${e!qlKBdGfWA_Fb`0Q{-07u6NA^PADu01 zOkmh=F_3{@kY9gMvNLsdGjKG4(XoGdt^V#mjaxVwSsMUV9bxL*Ya)SSq}O6K14mm6 zTQit;qTL{%4Dr9uyEqz|z(8ICj{bKwVLKZe6I*APo8bg7P=@eYP1(ZP+61QHbdm-J z<|B-x$Twy8wBr>IZ{?Y%my0>$WaI_)Duee473GrtUylbPq~)`0aYeofzAb*z)Ou?H zhSmn#)IQbKWg)=VH%ZGYYJU7!h5d$7aGug%=uIFyeX=CW?QNeiW3?&8fH9QMN5u4$ zV{et0^~`IO|2Bd;%O>e3rVtXjGU+L!Kk&!IyaX2tc_5}MCGjudU*7i-z>k3>$l^d* zXGCBwP-YCX5efbj)lgw#wLY1Gb8Gkd-HuZ9>w-KyK z9t(1>r>P>&;CGX4B9w6v_emF7GB^A}u@n_9gtu=u4t69J6tbH`CrfZ~@iq?*B<1Dh zLXhA zohkE=A2vm4vNwOcYT}-hRv^~+_+kzkQx=m}5w;0ye|5BznnM+AM(BuL1O(y|;~Sd^ z7=$!DKJbub(&fkcY?1;UD!cJ(iQXyLS~eXm5XYqqTHtQ%of;}@RUeiXO^g?eC9=`v zIV#n9vJDD2V7MJ;+K{gFD-90JC2^I<#mIc_i+|6d2;)!wJ#v61P!;stEUSxQS&^Yj z`2C+_#6&ty&-A`0;@I;Y3=;+#jxt-bSf*{F^b0SzSORf5j; z{y^uKLhJdxHktLYq>3^5WOfL=1Y|G9|Ix0>qTT0XI?=!7G* zTql_VxTee%Wo8kh=c%NZnRL01b$nD>bAOIVyH2O>HS(viB-=D+xopv^n{_4B1Nh(9 zrNl?fDf0qG?g=|BIwK4Jc4-3}7(i1s9b|IDwW4US!nuF^pxkA3w`MORL-42v0p$I$ zRiTKVo0B3E3)4Od3mE8@A|G}wK<4f}-GtNL`B>G_&GKRjZudKsbntY|WVuB+G9)5k_kJbgyF zbhV$6&%QK~f&6*#FIE%vE>?$fmo*3PSsX49cYQ8;zZcoY5%tg)w{2Fdgc596#?Y*O zwdolszkLj=szKnFn)_W#NkO z(WXxURWP%Gn=7=h{|_=BX3{`!troXuk3XYhQedtVfAU4&&ZD8BoY0Vyor?^sjzFz{zP7B1{ zHqPbyug1dYVf6Yl=cWaniP^S2ZTV{H+VTkog)>ZNjj17pG+zTB_(CSlovhW zGYKi8znI12hB%`;|(2y6WriOfTT;XK)blOwv%y~z8A{jc+lOQsPexyZ}+)X*6Id^{;Go~G0R zIH8S0($zvb02@4?9mH=}!sbekWfjpT@^z^=}$z7tKe8K#iwp zzPAg1Qfvu0uPEC5a8?s^GXBhJW|Y(Mjp zs&`K*A->;0#7Q?Zqw@Jie6z4^=i#hFdG=)Z@$XZ5{&UTWLmuhv)!<_$mX<@MlfsRw zfG_0Cdv80O&Gr$#S1xJ&x>kw9#?WbyI)95W^=?$`I@`pUPBhNT-2LV0)&uy!s)K!7 zyRPxQ-%P`a$WWAXYa-@S!KW8`2-ca|?g{KS^xsbp{&YYB6TYx1ExA#)W6Ou(Y@e)W zY9(B1eCxTB1g}R|-0ZXI(=JQ@e%eVR$uRx(OnUwN41^-mD<=!0{R@M-1 zm6@!?%{=AmyuF_99<`$Esv<6-pT$7hXFlm zUW-ZIlb+@ut9JiwRP1`mi|n1D0FM(5m3-rOcNO$^&i(@>(;h}9aW)d|;9VT(i-xQt zZt>LCA9!B}Ih@Dy&we_W9BVo^|N6#s*YO=@)+ob;f--Z@vUhq5PmCP{8E72I|AJzt zA?HSiIeW?TGl|P#b{@CoWRBy_ARoAr1gzUPVJ|zT>@Tv zOM^q|l^`SGva6M5$n zP}S-yKDn?C)o<@JE!15pIm1XC696JjEMallc7gR8w;aigCjY3_{rn*Z3qEf6JM2I20a zcpRQ8`exp-dhXL;UtBD40Gm=nyub90l(y9~aCoad4BxT_wJd}D%CaZg)q*kkgPH#z z&nhaFuQAT5+U>b)=iFbo!s3Ssw+q_)hz}?hUY0E$%T$}Ez9RA>o{O+BEN&UQz5UtO zB%COdb#NfzHQ{#)FFSJ50+d8|ww!^!Y)U7B|H!5?Nt2!ZH0}!#sz1aD;zyG;8o`I!jq?L{dezPs^gUXjZ&GvKPQPV zp{-AxBO)Q$Aex9n*HhtDVTgI9*2h-$(@gNMn-N>lBgnk|;A7~)*I{BMbPl@;Ck-{L zxGpYSPRaV#U!etE6#~u-LCbehqwh!ypVV+x-)Wdchc}=QT+e@Ud2E*a$if))BX+mz zP)laJX&zd#!sWk0lXVnq;0%ekcaC$2I70XpRdFg(k!-zw-1bsIJ{QUH)SGjbl?8Ku zVO8%;y)&rVF; z=S}HUM;wb5t6GocTrwpi3VEDhk5g#@pEARW9KrI+@5a1DLBGbu6ljOJ4QfZqjM(UC zafyn>n`dbUKa5(p)NUWA{-TvaVg$b&-z0b4)Dwa}@xhB;a8kv&H7K6W;M zk3?`pH~ie%G!Cx8WKb(=x8!!te2(**j-q?mv`=eUS$9zEQw%Pv&0`X=ikv7t_Za2c zV|;EqT0w=!bGw!JkEK6j+!AmP?*g{tHQKP(FZ}ChLxje3$)Z<>SVl@OW-dlBhy)ex z{BU8RVBWR$R3##vvGq~jZvNY^!T~=v1FRogjr;=Z!HXl;4HJRgvYYkk3B_x;8boR_ zOM4E~tGNLzaEViGarVwNKd!4mx#E!uUy`6Cp#ps0ZW6}S(ZcRVXs>bkDV3PC5p$3Z zu#lJNTz~4pxAA=3^WKrEnqFa{yM;@%6I4DReNJxpL8NLVRi4&(@{cpALGl{yM^5$9 zg2(af055nomtdk1@!oy@ZUrsGKX`rgu5!h@ZMKSmzRj=tD%4;8(G0@IV*`?`?-h?8aV4qJ_V&jC+@i*q^o-N|I8TY71XeFs$bBa?+^*3>p8K0~ zB>YKM5j8YIM;MPHH;tJe5t?c&$F|zVVU}eoRJP{Cp>uaCUJQM5Z397#v+tO8o!O-< z3`l;*Bjh$eU6JR!Xbjwy?|z-UH{J^SQKFdnIAN-YERHBj+1eID`f`g}c_FR-!O4G` zqA?CZVy!&h@)~!rl&}{!et5uro_Gk9^H*J9!O__x{ zTCq=}BZ&&EUK0EsaIL-{G5YqZIG`UJ$>D;*G%?3|t+GmuOp?b1x;5Fs!*TWMHcyQ$ zta0;u2$M$pY!a)2rZ+1h&TXkLw~AOVvO)6?r__U%muEMb2KWBH-J|NACGBSo%f6`C z2+&!bZ}lX*!~6=@1fWS+TXq)l@??FzJOC<9s9j=F^#hCGS`5rJ=!};5f)E+0L{|@6$ZwOyyp% zB3ua?UC*}Sy#vpX&A5Nq-6e14_#|3sYf}3Gd^N&pewAvL%%jiZluHvZAfY?z#&uw( z@or{ewZ2)78fy*AE@eRNbplFVs=MkMhV6DH?C7P${-;N4}m4+$!new4C@ypT^<3jr&$=RgI zaP0DwEvnNZ;XvCp1MelNvGq|1WCZ$d?BC&CpFdXE$t|%FY}i~sm&;*YT_1z&qCCBp zHTJy$j*Q?^1-0_>Amnf*h+d7c8DQXI_V!49ktC(ggbOsMQ&A6QQF0iJ@q12f*EwAc zsU+0VWLJCw_-<2&$DTE6=*u@p8pWfY=$gJXYIeG%Q*$<{E4C?EXz^s`>OZG!-(!R${Hr;zepy28lTxioS;}JM zjL-fHFDEDWJ@1l7yU4zGY6@FeQZoKQ?tgTSmSmkkGq14_t8mc~4HdXjjymDDRWa+= zm)E3Qx`9_RdwX+AYFFhw9{@7X+K7x~<=p4r>Qs?sJ=&kfXVB74G!^8JCBR zAEUuV^!tc1<%JG02VcotL*D8UO4q6S%kz!RXQx`a4BqKYHSUtdTihj3kApl3d~V*N z-&~nae#vlPc_z5{*B_qsqdhr|HGNVcL8CgXxkIQF&+k%SD>uNTA zbMw=iCpe3py�Qu_k*VOPd=Te@4YaGyOisrS>;Q>73+o{g2Q!_A}O_Cc-jEj|d#X zZ$b-Q*;o-G)wj-xe_hageEWf2-n6Ije&XhtS zoOhcGqqhjeb3?yeK~`61vxJ3-87#Y}6xz&-f3G_QF%g!FHRagu7^bSDwLzZ8=Vf&rY;qCMq#8R%-t}mMqK5u4EJR4#Zo_Iw za`8|b>|B_(I7cWHeb>1Z*JuW&Cs8-49zJ2U3ho z9%!GLm8GAmUn389nX)41ui_ogizb~&u92?!*XxZU_BhA2>OmagG~S6&<)onVho6e! zI6}d_c{=i#A+h>36@}ka9WhkGXI@Uo7SZh$L!F(_E>*djP~2Q!(Y z2mfITO=C2d?`LJLO9e~_70={MwAU7^RYKd`Vi0HE4fNthktT$4SmA|*KtV@VcewIr zaap6~9k%FKey(nc1(!mvx-q2du(__O18LU+r$>9J2HK`}mZZd-z2p04R&O7D?Fi*& zBY(CMCzTvc411hP z|E0L$qS;5uiVF6i44sFx7Fi7ESH(HfATIV?neM-ahTy76MP#vwoFp9Vf4z!jI*mVb zl8~%ddRIi>=O{3LM(OF{{7lD2efJVMi>piiC`HsW=rC>nqEjUtV@AGmaDr zsk6qbow>dJWnRce7(U~2GWsT_C{>M!mdYkorBr(F6TwcCFtF0KdFw16t!7&^!@s6L z$Sv|^qi-(K2=S-ri=@@quZLggc8@sORU>CYA@N0LrWC+=hjQL0BesN8L|PPcHe86v z1JN7xj5$891U??r%PZu4C;V!Sfg-};BbSZNx3(*v`VRSWsgQ7;vT>vp>DTe8_%DGr zro}|(7vPQGO3P}>>>AqUk6AOA2qt<*d|8p$FUGi7Co`C#pJDnJ3LYQhue<-v27tnMubLw!cPc@3_BsA02oHe{k@-*BgnZ9OH zXsM=TmOp1uK!02lscH13af`qUf3wBYpV6Ap{^qP;RWm4Rp^7Vnq{k-I}rs@;XEzPF&aIN!~jtLEdJxtEGlmEU4kYM-Gi45rf>=c z+|1*hH6=^L5Ew`#)T-`^yxcq&9-scKK$VM)MvFecm5y>RUdz{dhWwi~*T6@Xcr_)X zsh24pSR2o>MYJLoS6i9$pdUKx*Wd7XzZ=fy@seeyN!Wa0iE_zIJLige z(v?gRhN(fo9K|LTkTw=uW63^p(FV$ap8X#m`PzvSHn5@(ohE)-;Na}c{(HgY>|+M? zUgW{d)_*OXM3iIM4j80X?{8hfYa*zRp+PKD&h+5cWy*0dSFDwkY#auf>`z5yNB0OSbe75NeZQf6fQ(8|7s)R94UfWz!*DhG%xkB zUHRuvVHjF^29vJekhGTrahDVI4`@s;Gmz%34@FYO1*tZcB_45{LjEk*4T%$L#-Ox!;9>CF`1caTUE++ zHkt?zs1CD)g$lxQBLeuG1BK-xmyXJq-)RC zW;IDRC7yJ2yV8WtoKC4cW<;Zl5yLjkXFPrMrb;840s~gV?et|V(vF8SD#NdF1shH~ zmsBpEPf-Sz)i!{ceBg2BI%3rM1b8lQEojk|>&c>ykBuFZ-L>%Vg}{^@7c>Mo-BnGO zw*S1Q7xU4y(N2BZjnx4Rm~&gv{JGE~tLb zMYOSDz|;}MIU>Yx>NgrCO*!00ZV!@Vadd1FY`FHji>(1Z$E2O1gu*R7U#N+ql9Gqv z?swn|Rzhw}@TioEjcu5po&p~8J>Ktn)uD*9`l2pi1UW!7IuDk}uN~X!W8ro?RV}TO zuD3N#=PAqp^EvT^PEN82bq=4|=;?tV;x5%yzVeAmnyP~d45`+;)h%Tm@}0M9{|cs_ z@1`Q%t!nq3c|P&F=C~YjaF__h`2v>RqM7)wr}`*pieRf}D4R8+_$ple^v@iDTCN8Q z8k#ULuG>qK)D7+h?$?R!4kzCs-Z{}ybR9e*BO`xL=1}p`RaVw6-@#=_?a*B& zgt^BiyxEaleOn$EmE9&r=^;ZMiW|3bUng#j`ioGuI$59AG#laSEA>&LRJK74%p&i!s9h$v@1 z=NAisR6|s(Orq_Iwrl(6FLcUQ#y7PEz^O71nz}U7jt;mCVG*nO>c?yZ+0gb~<#*tn z86G$Mv0iA2ek9@~&a3`{Kin~2?pL!2Pqxd>NS zx?I^wt2jsfR+$}iHrpDs45~t3xaD~3guQ=M&OpWOp%dRKdy*TL&A#FU>F%c{n+fXE8mw#rCy={dNWPEA80Y6LPwB-t^-~X)?GYk9BEn*?zm_Fup+%M31y^1r zi>O6ibon}5i)+>`pNMh)<+w!yIUMoP_SR`%HX3}EX7Sca4rDj5u!x8Fe)iH$u;FH# z`CvQjagy83+dHq89R5X&X3GZ>LJFAbJhnbLq-hH%=q)HixW#MX!`D#@fq+RpRyR7w zRje!9S?PeMj6`lp4+2F3aYZmi`PX4>cs^I<1Y}H21-y%dOG8b?2V|8@t4{S}mPe%L zOv`eHNOt^9l7CH9cq__mz>@rx5LLU?wP(cy01Fe=>}9FL^V7sS>F|(CSD$yPK$y+h zn6U#Th1ffjhTL#=RX)Epdo7&s*z~!{nC~m1$ux9Xl#F;e4}jWrO-h-1rbF`4&!0=Q zge_f1U(;3!SlJ70W{ejFe=~X`JTC<3t~5^-KO`Evcoa;cbKMkSMJTEO2`w99wdUXW zpCJ0Ct#%%D!4GYNKN;aXN>i?iH)wCM@qxkdZO3{|+KvxsqacwRCQN-Gc5mTeLf;<^ zQ(Ie?V*&}`i(2@5VE{dvUue%D5q-N>UMQhsmjXsBvvOSNgWNx6ykx1S1pj{VYL>NJ z4SNa(n+$woh0{Y>D@JasbRE^hAUo=+al5S*SVR3>& z1K>xtZ@Qo-_g?QME>WXr1*0P}o;tpSt4JXEM-#JAIO0EiClzlUx8<97Lk~Cz!5rkf+rmb;-naqn z#BqdgafaM~t@=E~#-{sdF}3*2$;uLgWEb0N2uJWaxgs+7y;v-}`w79O>gohz;B&rQ zvG7kHzhZ`q2}=&hv^{Q01JwsQ)$kn-HbwJf(lxhC-jRzaF&|{O^N71_94Aj%ue2Zs zOz5-f=Fc#DF2i53<;zuXfCpSIoBq3-QO(EA*|HSIX$i;8VTM2neT0II?6QRf`ITQv z4O>jmi#?8hE~TizJw8b;CHC^#fpv1B85K`i)5Zz`Dz#`?~mt+N@80B z_I^C4(TfAKjBKsj)8nQ3$hEenB;dS!AuKX7^EfXCnOvj`2_4jWGlo(p=F!nJC;fDyuy+i8|rxh|y2yH$70 z9~Q2`_lbwB?d6B?*QU$6;pCE+F~51ga~MR{$%Q%}BJlG_IjmFC>2hmo?V4}bpvqb0 z%R21jP5#BKmfgnQ-n%5LVmt5^N1;XG;o}(J!}rz_fD9=Ab5h2Ksk2@Ei+%)57rd_y ztV-{%W9v->jBKYw#>2bPRL<V5ekFR9v>kL*7SaXB*r>p|NfhktDpX z%2pXcq`tu(a${X{bBXMFUtw_Qoan@KF;@TF0RY$eS|@y-bc}Rf*WJ-(v8jALknGbr zTqqvn_LIt4wZ|3S&QpiASeOa2yRUnQ9Jndn`xdh79l}5d5 zxbT?@j10*Qe5u^nmpdw+^wCEVr{ZaG*2oI8TUS&n1nN~EGV*K#_&Qq~Ry$u^(qh#w zh=h_{H^(!f@n@y*;lH`7Ge6Pr1~D7;jaQq=xun>EpAyoJ(=-|UmS)cYYyDCQXP7MD z!=s^*PfJHv=A1#F$TY=G7gDy;RMgekDRgmjQ!*=gz-l&XKc)ZiK^c$C@4B*!3-FHS zYGvC+_zz1k9vBs^FP*}3;E3V}x2+G&JgG!bNo>zrUHnp@zSrEY6fY!~N3E%@AT zWX@`Tx)B@uETh~18yvUeHj+uNqUX(N&nmwccbtJ%>TDvbxA4 z+y!k&C*YonyjydVlePOt^R(*A!iYc-XW%s{K+`>#D+k#z>FFLp0Oz>a6qP=b)ge4? z;euYu}LY6|qhm|vSI05H42KzN9UrY7MOnNP{hTRfp)9%sPZfq3xM zdprI5cxGu=v(Lq9j_dncwVEB^u?^-TY=9YX8Y>)$>gwv&=DTQd;o;!D2SR~a!GepQ zL%>!;lv1-35KSj0Nd)3#-9;#K`((46_R6eRHaBk^jA+IiL!o$13@4{t>%I1mbK+#I zPZuB9+iPv78FOb$7kboo4US^d5RR-Zq8O^)mMl2 zAq;rhd+(&)%3JSM>U04xxHIFn@f3H_J2w%L@oF`)_T^j`!1bnaIh1Iy{hL({<=F$2 zpUni|geKK0>|t3ardPa741hpD((kJ-B~txu}0zs&KGr z?nh8$e?Pj2m}^C&4{?`RK|PJndGrC}UTxHC+VF&Ri7qr^Y6A`dZ5JTwuy@bR0530p z6e<4i<7ey7KHF;3sMWq)8avnj$wk%-JPgeFb^R(P{U1v@4#qRMPPzyyyOZfF>X%am z5{H1D3btnIiIHKh>cN$*Ab-#anBR4qg7={oH2JCG?4p@J8hRzbV-ziQ0 zm4^=4cO)&$yyWK?@$cOrln^o49v0o#Y-*7$$oy6{5;F4Ja0E1Y`KZ2(72f?NCPM#V ziXh-S1(7VdY&14Av9AbsXl8G+1UL`K(cpI1C&Q*$L2U0ud)eKO?t%fjhi}i&;hnFx zLiD+Zm~F0H>>4!JFMSc_*?tA0swkJ2(|u6Y$des4mh}I=Vw3)J1st4h!1DWmk!#Cx zI^WTvT&By>yk>XjEFhtxQnFOR0X#p}V;eI%ngkGv7af{rOaB~?M+1W!o^shYXVu9? z>jT7&3dqCRpU6yWRz3k=mRCvZRRHnpcmu@QXkXf{4JHTCPgXH8X%S0}UYr*fvFW3k z4WA}CN=D8@|L*e#6B z7(A^Oetx)(aal-jmFs939liO1ne&YLf#ZGdgy8*;r@v(LkrU?n`(OJOx5f=}0WLzk zoY~v)WI=#dH^shaUlIWb99W#R5%c>5vj7F;$%(gi42NgOUk(2KOj?cLof zSDE*f6U<=*9ASVa1cQTzSLr@sXqaoH-eS(|^8ir;2hLGaVG$DxSv!X{rNyLDp;g&d zXQqs>jtKaVJt8`{_Z*OQQRQDUm7-?@bod)oYYd*ImoYGR~S64Ps zmowcde<`WIr~I`w8>&MXKZ1PGs|&;G`d#V0z>;;@Zeb3*=LO((`)l3+qrSYlA`*oL zV%9ae9{u>)8D&60z+w%Lg0j-&=9W~Z<#y#P929B1e;$oH1(ENQwo{=W7&rG?hpych;ZM1k-@_Ro(Vx}Fzm zk9S*<=vzdgfV+HzdxU1<>sBnd0xeF%#q~;+&s{w-Wzt;r8|;pxo3Wk_&}6_As4yra zDfrF$T(JW_!L+@dYuubG9Ri1Vdj_DlrCPn@d?Gdi{}Lq{o4i>28f;Q5^rV1_ima&p zB!c?5+-~#sF5#|mV^gCqR>K?gxBfPPqzIOn_* zItJS7kEU=wnhlRHR=tcgd78&l>w~4`!$*EUI-DORL)ZqK!ksDrPY@l+yk=N_awGI zb9!1**3PH{M={9)M!%=1C&vK@_D>9mi8K4`0A^t^AE zPGE%92LxJnBqb%KWG>$~fURlV9e#pH*OI5e$(g0^F63vL-lTE!ecgc-iy)R{@c$8@es^g%ZL+<$SM7CcDxAjOp4v=n;X^03)amq}1^as6E!qGcz90xLM}0}uhv-;u=Mw3oBQ z)`sZ=3mO3smU@-|$59^%*VnwMZZ*9zkD2zif`15F38>F}FGG8PYBT+Dk2EwS1lqT@ zW;in#|2P&3HXMvYbl;v3*GR3w^Vey1eT4+SF~~aTLv;=MqdHz8Ix)W^j(<$d`?lvx z`qV05FxD0J!y~PMz@eRaf9bi@|LDNy=Uo__65Mn~76NY6faX}Us##C^9f{m_tOM== z3DvnJ4(=mp4TAQD(52eaC^jQwBY|lsC&y4xQL!PzEdW3w%=Y7gsTa+zCzSmBB&Dk2 zLH!YEnstMJ^YR>ThCkG&*O#J8_tBe8-kfx)TT(LG%EKi}At|UukaCx-GLAlvIcjm6 zX?l-L-~xQ<=U#@<0_df1w#CrMi13(rTFb6g^nrIVHa=dou`q45^#OC5F}T(11`}^K z!{z0f(@Zv#8vGH%4-A8F1Od3S{oQ$X5&o*bYMJ`LtK7wGf*p$As#(r%D=ov$F-zPF zSVHnwi}#|U^_IU^H5s=0BGq`;hm&~`zjBr@O3TQY%tfRF`fB`nuzf1^$}Q;5zr5|S zXmwT;X+!sY1S2r3ODRE^KlEw^U5-vpM^EooH$8Pp;WiN_TE#Kg2+@Eu$^}3b_j>?? z-+#dR=Hc<7>o;1DR`I`>pl%Y6YRz417h&dHOx)EXD_Gvgs~S9=R=t{i%l(S8{$Ei`%&~r> zlMRe-P!zM5F1u^LVl@2uCU(HLzgaimHMqReeo3JCFL-4N#}s?zmQ{L;=mgT8Lbql*Bs{NcokhR)AuRa-HlJ26(bq| zCp*j9sK4K*LaAQwjFNK8=dgGaZtpeH;^xJ zH0wJ8xJcOd5w;V|?LpfX!sew{u5Jq2_nRW<;OFOp=1>5pvO7}UzE6-?zR^L^&;W5c zw=LZ3plE@m$tzr}fKb+XC@CrOBK%mTW@Ev^aM0?Z*#l-})A#GNC zuM2o3%!H`k5GdGMNO^0HTI8Do3|dHzGHMwCd=@S3|6R7x(K`at9dbUP0LP2!y9R;* zV!5#4KgA~ed$p%nUyM9TVB@s5;zAq}TovhhwRS5~@ISJ##a+an=DMWLdOtz)w|8}` zK1%~m8059he0g=LemgRUUMpSrABoHruDY#&cW#>V3@5_o}8=AMj8aDVj-dL28M>*+!U5OOE(H;EG<^@&`eBBaBy~=ste}R zd$5Fp5hfa)%oz3Bj!z96wt*XM`T6+1&#-s*_fAkpMxgE~qiTTTk_=2sYpcan9YbCf zOPO9XbqBDo!UG44b84m~R>O(n;uEpnZ@OdoIPKNhUtitk*VM!n&y_V6BLSSENM2hx zuvb|4X!ge`qf~&S7T2M(rqr=7Nct6}D**`^<>lb!(21;Nyoihp-N`rripvNB{)Z=h zcBRK$-uA5Y6`Z>QRyhhV;|WV0#KZ}6WS6WJ`WKC8}3=Eu*}=ZlK-l z`U}v+8q%o09HS$kk$P%v%)gfeE@6sb@03DzC;z=8mICcNrtGUDS^0^>B@}s>tAh8E zqN3VISDFE|tzBN8J9o5@RP_f4W{`WHoDBd|m;&$wxRKJ(Oq+wZUH$n`sv1RHZI?-1 z@bVr4;N~EQKlJYUJot-Bpqa5b_}sVT{I!kNHRLuNd37`4AH69gG{BirRK>3{pZFWwwL473!6QbZ8V{U^ zQb%OA18CZ|xJ5<{#@P}@@40Mb2VFD<b z?9Sm1^7E2@vd{nf7ZcOGZG7gwVD>(EZf4>X z!1e%Q6f!rb1+bQbCias~Ps^-{c5K{V-}ST^ynK+r%0_TA^-_OR*lexw5Y`J2k>&wY zC|&a#1Iz<@eKkfL5e1NqnSun9*6@`29sdFe_u31r{C&RGPkDkp=fax?-ukmx!P=Wp17u`2W@jOekhIGCj~&#0UM@bT2mNp3+w)Ob`BU7`lb!NCXf*4n_PJ2d{#K>_^P zhXGH&hI{K?z~*EW-wzzk&Fxn!e>j?3OsYDlOHk2q2ncd`T2ukx;kteA4+M{qgc8YL zTWO6>v|nhIBhBr!!5u}ns)gdAzyjqJ6v(HqvhV>GF`7B1Y88C7J4(T5i)l8NM$Tv( zb^iky4f$LC%*Eps(^t=70*Ltqu+ZqlTFHZGa3B$2mZI)P`(YW&fN} zzt5jrn*Q{^s9~3Qggh9P*Y&u)jD**wE`6t8z~xLAFiv25{BHQ>08qOU)g6Q+PPdFE z@Ci3P+`R1>KtLzC=?1Co0rLcMIR56F&3`F3JJSXJh)Sp#0v&2)tdl*xWL zK7e@gh>JP^N8Hrv=_G={K?cBDq%{|x2RU?VdwP5APZnz}>rdxjlT!FSuZ=HqdMt-} z^3aGRWOdanWY|i4ydYO@>+P_;kz{&DE=fQ&2hY|KvqExpx3=hvd`P+EFC!h4NTj{{ z+N16Ri1%f=-^G;NUPD}}mR-5qg(4%pS_%C6VEo~VeFLy`HX+sC+$yf!BxIlu0J}`x zf7WjPg^G%*$6(b$DPOf1iu#d_vU_)EtZjMr zs0LuWn`f=BDHW+se867m7(1I?;bPPsGvW zI%_3BLr5Kuh|K&hFOLa&y2A$ODPU$LE>o5&CKY8S+`M25Zh!4&;3iW3_f#psVVaI* z^x*4xE0`oLWNOuvHA11#;?mErE@r*?Czv`y75 zr;#l;djh}Mu<`*LVezre_#noUA)wgF?$_I2@pb4WIgnL$)0HLK(aJKxWyK3U*HJ-$B!9>8hUo_y(BLv3t-GjRk7vT}le2dxk(Qx$*g8|2#wG!Z>!>v9vmQ;>Ovwd0$Z7NaPx8)@on7VmjHu%B8Xq8EKZ*Rd4BOTo?WG*2lCFSsPh!Pl%4zFv`!>li_^6rw|Y~hjCf0Yr9 zHtXajvOy{7fnh!=K|vV%;>0|)p2!jEGVScsd;m2#A1uE0XVhuN1aW7m6sxM;-xsfH z?NqcfAld+=k%obNX1ARVSSnyCDdrCA9|3;5wfb;MVSBzVwT3ae+6VaiV&JCUtLz16 z7LZmHF8(>qOh8g>di>J3hz?LTfQZI4l_faJrSi<|Tpk{J7yf$kB9`}R=ByDuK%{Sl z6_lQBwgXYGl8nUGkH{}sW z+40H08Vh?3%*7*-BxCNIpMmIDz=q1}eB2GdOpOP~UcHtGA~`vE8o$>~T>=&F1J!(vosgaWh5P+6$LYi4eP;lUA&V*43?(we-b(WW zSzhv1asG>%<8ePL1!`^P(EVN#AXADrLPA1~R>NG3dZQ*&zqR%r9eKhK5}y| z|MT-Rr4OX%1n>W9>@B0}h?;gm5+GQD1-D?q-JRe8f^%>Q5Zv7%fk1E)NN^|E!QI^w z-0k4*65Q|Redn7sv+i1V<_8Nnr+fGA+O?~zo_eZ!R7|aazh_DG7dok6xmC9y4|h-A zrm+0GkHl^BNUI>nRIuhE@Fp#ZH{E&5@6*Cu=@*)(LXeQSUF91;uk!+D<>GmX~Y1O{wZv;Yg5QPj{554Z8ZkwyFXI9g!) zHfSKxztLZBbI%eK8hc3f@L*{(S4IY;kS&8Qc=mpB$0xz#()a7v;PR8!zdZ3N^~@xZ zJ=i`jB2W2`(LdmEqpCIIVn+>71!TC_M^Ut83dGg$QPqFYN$Z^lJj59TMn(7*@S>!G zq^c}oaT#t0bzTQajbR{i%))MI@k{uC>gjn6f}2P{`VklN6Nx>nk4Adq7ct*opHfms zYYT{DD6d{UrEW?^9^)k(0*E`yL5S2u%xeK+DqC1}c9$|`^?MLLA_xfy9iN?b4G)t< z#IsH>`R477gJAyT1gY@R-+PPHQ0mvk$^dj(*pYOOBCnw#XLMvZ%8!xTTI+amg}diNJ!QRk0K!{$mw*`3~nQxg-) zJ~u8hni7}@VwhYkDG&7l0fONC^XJc>DMOSYlO&Ww|7;&7vUNKCPEJqn zi4UNFBRjf7VLRheRa`Yta2mPZK!`5nX{7!CIbV_l5e(CgjwJ(riIquosr;Y5&hC63 z9Ze`m_F1^m-4b?#5t8MeE@E@-q$UH|3uyazynaJJK2ysYHE>oHRWiiiK6@n5%IbBj z42O!Qc(Rc@%$5k<79|fud0qN!`Ge`(bpM7QV`JEPt&D(G*`rvOL<*}B_@K>cI_IJq z8!Id1cBYq_Mvk1Cf}Z|MVtc;oJSe9*DEIL*-BU<=d(;y7S6%AUCqwa$4mbig)0cbG z_px9&ud^_6YU-b3V=-?0CFIEQc$85ch*H3(;-A${s*aB_PMd86W5~&cakDpRF3d%c6TLmA=m z_|@FpHVX|w17}>s(}{~Yxiaa}>6%K>23UHYrcB-4c{`#X$>Ej`=EYXavw3=7@WnWM z2XZrF^Wd<(VMajwX0fA`o6Bbd&t=`8wDo5N0Xv#BN2V0{#mkV@R^3`I9VP1pxVyW% ztGn??m%T*^`Q+qS=P7IVg>$v5^L5!05-e+H^eJC3zq4K3_NiiBxC9|jRJ+4No@p_| z*4xAMPVgDuulOy87L7uOzN($GeZoxAVOH|bX~I<7vz!6jVu9CA)vMs1i~Si zno;C$ulWEmQ~o>IhI%Fp${eHn@Edj7+<&UE0sc&5Z+h2nKl)00>VgBLYU+udc36`bVkm&erT+ zNJ~9M)S=m>rEi|}atM;$!_MaQUE)EJF)Ao1=tF`>WMmK~$iW_e@BicAgFLJ25rmk> zMj%AWG!PK*eAPb+egI=3CdRnI^+?RpGE^ZidM=5`=l-6)lrGQO1JDDzW^2?u%}#}= znIb|Auu|U3QF95s3~YbRn$6qGL`ir#j=^L1ZttX@v5%gPZuyfHD%_&9y{aO$CB}Sg z0sYyYeT|#^zs0J;+rSuTEZ6M3ZDO!6H3A!tlvH*n} zFVQ>_@qMqLX!lP57~n(xI?=QzMP~5AoSdZbW*+fVG=b-7p%*hoZ!f`ZiKX>@Ef#(;8yT2j` z1s8h+9BT$|+FYD3mmNANoxR}H=>Ap~EBnnn$W{ShSH`@z&!0UYO>3UM(S>WV$2$$2{ z9g*t5O+k?fHzyxSj!fF?zI52uxh$)GMm|-nFJUR$xnyf83t;h<{-WEhfBio?eXOU+ zP)PZP%A2xMmY0>iv?QN4{mhSSlEsEvpL$tm8IYL6p4|$gdGqih@R>UXMj!J51gn|S z!QK49Mo6Y_8^(|~2R_Tool+uXm@B%hAHja$W z91?zsts68-vLesjh}Sh-Vi=-cl;$Wl?Mx&G_ zezu&siPS1(ks6upT=R;E?8>u#bBJ?RJMm>BhxF^zE)yBsC!rV$vJZVwqPm}s)tQ@5 zAwfk&O=@cS^$S-LgQ#fopfOdjs-~iH0m)j&y4eKCQDEKX)(eeyNuYCOaet-;Vto|& z0!@^gKM_2hM+J)c8n?XNj#hi$Tyl8+!=d@mB)#{2En-c!*G53z^waNIm$-_fqDefE zeCbXoU#mCYe3K;P4pu4RjO%HY4qJgrDarMr(0sUi<-Tx>boxTd@q_c~*YT#XQBg@L z$TLI;ggKcx8BQG+q9Rs(s7Z*)9kO3kbRiOb8aBCba~{V^I!DRsQj{uHM&QxCmuG%QwYBp~t*|e4W8%rq}F!z62d{55L zsTjQ2;AEny0ju%7tccy3-Q>8X;5u?gP(b^#>{&)T34hASCgD>q*|;(X=#=vug!jDR4v?L+V~&4b|O+`n0&;P|5dI8uDAE?;#g`xZFOv9PTU zsT0%Fx7fCq$P#}&`D=CgJEN8OV1xP8It#*D7C(RtjIh^*6-RU(1bAIv!-=eWCGRoI z)vm7TZdS-ef*wiC1Y~MhWJaCrJ6pu{#lb7~2Jk2nQWXh|U2kAfm|Nr*d z{y&_$PG)j)OgIQwj=^zN?UuoGFZF&Ljq~&K2Fv0nA9H23>)>pMQb!;?`@cS9mB4Y` zsD;h27w{;N#q%;JSG81BM$^upJVzEMK*1DjKpm6@M4u;Dm6YS@Gy(Ns0-?I`rEj6q ztZ)~p1*`X~9IHd>&*XYFlrV>+XI$sKFWH_y!|A>YS9lCFCB~@ECwOv6;c!`{j8xuZ zRzCN!y*P8Z+%z+^PwQtjc2z;4qx>#zPMbRDZ>WR{Cy{V7^J*aFXYZW7Q?h;lknc)N zkI*OUg!Z$SaX3BR6}($WZY`j$ucQzRM)Mt-W`sMcX3rw>dRm0iNiC$B9xCis)u_?@#->rUq>(0HC}guxw~ zp9!l_Mg=%nX0DGYAgVO_{m%AD)o?#+2&VfpYe(Eku;fzs@#lUHRR#p$zc-S-b}9CbXZm zFrlJ!ZI5|zd)MY`q4Cns(Yg3Jh2H*yyFY44oH5kEicgkTVW)mP!{?#pN>u|Hybh^w zPwX!xB_%f7ghTR1Vz$^lc@@sH8aigWE#xk4PPG-+a1m)diSNky=_5s^7o^gqIYG;# zc}$?nE5=fg75}>?PkJU~(fFxWB+ZHGQ!Qyc;&6FKM@L+1mAYlRn@#5n>4&sLPABb} z6N#x8!$zBHN<7qbv%@;Y5{m5JXxYjU4-{7N<{Je6H6y3)W&z|}#1IIIi|fj-7tWRYwVNXww!%}HB#hloIA6n&PUR1(7Y&&9h{Q9j_lmorUcYi? z`I~+ie$Jw!3k$E`N)Na;GWC2B%7lu~__W^a8BmXUx+Wa5E>b@cTZY|t8-!#+i9IN4 z)ie=dlRD~p%1n=NDl#isQctVlpbjyu(!`H*eU+Q%z4!Rr8uCli`JV?jZ@Pd3jx>Jw zZU4V=zQ(y7CWK5*hmgV2q(HN!7054&G_s~q=ASKOhqesXQV;Y7fBM>g?-of7T20jQ z>IkT0gs`CE3qRchg%Jwg!}~JP)4=CCBo7KoUas8E{(~QtJQ1fB18eY%J9$yvI_P%k z^yXdU9QDc^o-K-q2;H7>lIr7%Z42cy_zKN?X6w>P2qnK^nkRe%xICYvd2-IrdBj7q zmb2~aLGoq6Bkg{v#Bvm)=qMDv-d9ki0|X(bqs zRz^mX$v%ir8Ay2*my){>K>3q)Qr;Qj1tS<+1S(aQ)x_0X6524*ciJH8NRD(b>Zo&R zLJ#UV#p<}W*&Lg07ro!heV~5W^00*spPZoY#KgpCRXhEvA>*VBZaiwy?NX`ZQigcd zHI^4yvt>6oR2Aea5$x?yNNEue6^W1fzMM>b?=pwqWGXvkAIUslFO=C^Lt6G{9nx|B66OIb}{K~9c= zwdI)d*|P^GpMS&a8yhEoBrsG|RCbhm0Nc#tUxC#6#-`IAb1mP*Us!K)9=@=Yp5DCn z-uRfaR+9%i@6w#Z2MC0ok$Iyx-OqSo&i}`Ehq#aqg^>x*RKo%Io1^1XOXm+0F%1G~ z1AokXM=foNq!chy-5Z{jp~!5x;fc+b`%B0>oVrsJ7c}tPU z{vpQDk3YCDnNd0$zn!8Tkla@G)+9h%7C1(Y;*YmOq#sfRiuH$zg#k(JsmKR%|4c37Jz=(@-0JzQvn6+avdrp!9qOa%r?AiqRzTPQxSwSAyE-5IZC z747Yw)x-A|dE9qSK0huiaXC4&I(HmKEci>$WY9zampvNZ+Im4mWo%M^e*qhw^|g)m z9^o^yc7#U%(Ci3=kvtsC7TIWQ;cqjE_g+Yq;~}>5r}jBD*-!bBD9MdYMT4m>72WFS zDXOb;1%7*(*;EvvB=XQD4aZ|Q^XCgZ(#h#5t=lIiwLC?WVGPmZ+1z36ht1@!qj2-3 zFZVvjM;M`@q0a^|7IXZy`xf8$uY=pac(|ehg(ys);~r00c}|9s+-KcCik{rAYPb0C zOJ^4+NJ>f`Z}{_^4%P}1`ky1f85lkYAfc0fIyoJNKVIHP3Js4o0e_!f?AC@Z;SbMy zo<8XuhR_up04utpND6FgIUaUT_U}(*&`AXT_}ADtS_wyz3dZG&jRLj(HJ448zr8I_JBrDXzdDL~bXpz}b31rQ>rMA) zT6qTzD_!kK)|b#)5Ux__ATU(N$K?VyH4T}`^5-u? z$II|=cb=n^CCNU*y|FPK8?3Cc@$vl=mb@H$E!wZHz0ZEPw4}K&dB(4fVM|95{f!W` zVP34N;!;&pJq8%007Ftp2(p)qbQT@z{Y^6S=AXqM6O)s%2?-Xl?rS~Kv&W+^Kt#>w zu}nTJbjoDte~$|qVCWO@*wF1w`vV@R41gGjIqlL{92S8`+XZOB14TtnY<5<<{lP*H1j7o{Mj5AtZ=XFn_Ank{5Y|#Vsmrn+%i{zX!z#M zVX7xg0Vo7O3Ob=YXHUFSdnTE=a9sAAhUwWNHN__n=NDvE~3LQYBf91dSo zvnP^t__DEOt5Qi6QOamef!Fbrxi$mJpy};>qHsiZ7oF}NUYhz#2eOf~uV?pOt|l~m z*Z96JLG%?1+tiY?x8Qs0xo0 zV@j6Bh^57AT|jK)|CQdm^mxIGjg8Ib!p>$sL?RtQ(C*J;|7=;^kIbIMKM+t#I~s$X z$H8>8OZ!QpVjbwdsAPlg(x83CH*`=r;v_^L3IazkSiMjMZ(g{fGv5kZERDj%zbBca z;8|BT%ynt%)>z!`T3KK3j}L2ug0j1dA;F#TJPeSCacW}Ge_1zEZP?s=V+#T_gp8Wp zU~cP0Dq&`Qn4yZw--`oSBG^PwXw0v~!4mx?Yg>>y-8dar5}p0YKc;FRpI=zGk$wKS zx!}HpnBjd%_L79}vr*?a?n1R_Vc*hi*t)O0yggrU%09`%*&iwo?^`zGui6p%diqxf zsqW8@hju5ElBXWX=Gjb9;j^><4h$^Uo2S2JW{&iK^cFrU@r(}XrzGI^s&Y#KU7$|J zV&1}exSp&gE$MY;i1>z}pc!kt>kQYi=F@<5!xvbR6mVFYye58ew%5Znhp~->B_-w{ zp7&bDqM1(TZ(m<-Qxkw0L`z861N4{t%qvCN_!0nCg^G$+Wotg9ZaJk6*&wvCwpPIj z8u`xF)a=Vf)U4HbXvnisw~L^a-r7=AGTJ~Y;*+qticNWYk(qzR#XUK>ztoyeoTp%N zmcWjHi0H^i@dcc}A>`oxnnm~DKB|j= z80l<(G<&wU3UIZD#>3NUX3Y+Y=di7;cK`JEPfsc2GT=zHPgY-xj+W?mKB0^8@$!?2 zP?{rHaI^?zmL}_y(h%D>%Yt&Begkg>`SD_1P4F1M@|$J>=6ziVO8m=t8Fe-vTG}iIUxEfE6?ddo& z3^8@&zJ;1-d-AhW87C;52AgVy5@Jb!J}0r3QDnm6|3Y6!kcvh%)~ki2{SO!5s3F`} z^wDOV!hEJI^XJbvr{gv;&|L@4;nHCmfr^fg0JI3}>+kzJ(D%EkDI#Xoc+qo<6sj;e z=LkvwkYByBR=HnWUH$WSXa!UlQqj-^&(8~89KnWv(yN7#;Y2nz3d+j%>Z+SL&sLiA z5^5)&9Zjc;ia3Z43!QTvm3qEm)BRqI)my2esTmH1^03Hv|1COWVF{zj2hRtPW15(p zR`JZu((Xk;LjyqS4uR0C{e{YJv8`z~|5|c#e{+~$kO0%ZnfZ;(?C$HyK+3hC$o;w+ zTt%gGenE-Zd|kOv?G=BT;a$`CT5g`XGDKg&LM!3owtT9!PhCO7?1e<|OKH$Z>3F20 zHB$5h%_zM{rojGAYS(7wtAAx!`uMT^|HyR>=~1V5;c_wCRmXzGbYr7PXY_35{pVN2 zJR|n+{K205bT9Mf$5lW3^T0p?A&>8#k1}t{0sDMVc&6H#H_dC)^5SrzGg8g23kFMW zZoE9+p-p!e?eDC5gC4l+?Bs-ujJ*6qW>Za5xBqa# z+hcIm?6&psRy56H6{RGiK4wJ|J+G!FYW+1WVD?y)Zy-CYA7c%vCoElHkP1mkNxh|~ zjR}p2SnmRU2fD=7k^f_VLb(Ycmjx#8k`rg)l3yNVdZU0ApWXYABr7|+uF2ir7Snfr z*s<&W?DfiuKIy-MnU>T>z(2C4HIz{`R;}!Igw@zaH0?6jr3eJU%;u#H>{#gO3(gM4 zdujAdHwZz~j707|jJk&UA3uJy@78Ksm&lpFBoj(#4@6Sa)s+E) zPXchlE}=S9L`ZcFM4RUee@S*$fYm}N2e}pIthV-n4%VADZ)$w*aRuEEC%TS<-m-Ey zu1eh%XA_eCL#AE0&V7Q~kG6b0vnt<>Qvu)&iqzqd^4L3qt}IOw+1+XSR*W z!o<|xVUBp^Fo<-trlB^zn%k=)tE3daEOeI~9+^YeL5FPH%S_%jpR5QOWM_b! z2GkTm&-3x*D_U9%%=K$nHKRb5OEHD(M`#1Z%T3!@I4qeJaj_f_@stK5agM^!=Ex&p z@^YTk%&|7|M?0Ik;glf(BX~bEjtw^5^-^IIOy9L6M;l%#@ZIW-mM$zn0~}UN9t7)W zr<3D51CO1@7;Ed*0hZ#UfnKPDo24LGbpGBI`$3^dYUTO6d2saKOzh>=4#t)C59jPP z1;4MtPWKYL(GZ=(*oEnME9~_P6#-OM3ch1M|fai5)y$bQGM>Ena ziY#y_>eHd@Ahh{^02~oTD@z6gedSu=V3wEy@BnQXp;Q-A)*>KkOL^DI^XsR9{$SAr z*99c4eN?CFo<}#FyuAZ)<&J!n4tcP{hFO|WNNZO3zaYlJ!QJz;VW*dmxslpGpWO$H zFn~TyE=N(tkGfa@EUU~~MHRx{AHK_go1e>2c+$U^xW7@V`d7S-D}+NGt)bwC2D8hS z)f<0Of#D;KYw&Kp>MDzLkvoN7*)kl&W{m@lm9q5p9f!X$4GAW9;REws#;Ifz!QAf6 z`VRNfwrmd7+WHA5U{%D|6`1D2UXjj?!~Uum7?MpslL=)IURcnY;!YSf(4SMEnp#yQ zQ>gvw@9gMUg-810EU-=mF>?(}72#E7wa2$~jNxyHHT-ul&cxX!cW4>bWx>K{KM(1zzl8*={1FR+}~ap94am;(on12O5XCKPuUh)5ft zV9<n)}9Ve6M)7npIMPI6LT&Q;~okj!8jITpd@gkKY|0TYF#CBB_qcawQUB z;IY8pAg%<60Z0UaT{nC!LLc10S3BF6cn>;uBL0Jat-0=!5zz3zqG3FZA`UrmQqjU8 z`cnkA#DnYF)Ymm{I6kmsYXi01iqVPu4*WcJB=Wr>SXrK1%^yz=<; z@@SiZi`cOc6`y|dx9GzkMOoWPYURCoh7sXD(~5^_fBuwN`C zNr1S5Ex{zS(q!Zs>*nfgYNtHT{-iwRTudQ`h1Q--)ENp{T3WjO^Hncve*HAU6V(5d zOj9IPrpWmgxg#eSs9TzHbt&RtgsYbfF%0(^Pb$mi=oypq6|e7i2lH|jQ!9EgOeU~g zTZn~|d?THyOErPpsgeILj82NzW64b8l`kyoA{|g+=l~U_zIO~o1_Cj7i6OG}?Y5>W z!n4)vyT0z;yw1;$D*@ywh+x~MnEa3~Ip^V+n*pz*$jfR;+p2WZN;MpLU*-Tq39((s zrwjw&`tnpx__a|Ebd%S6vMGbP&`KC;I-u?A+dPG9h?x`wHBQw%nJvq3c1r*cmd%GG zyC|pen2#%~;D_}XM2!xXP3f{_iNVbJ^xoy%}O)Iq6kfv872|2H>Yb+xxx zRsivK`ll6_ti;SGUFT@or-OhcEryvgx?(rEGj6|nf{1o}a^l$F4=N>r54kHeEoXsI z#H!W8f*Ku!z>`{sOHv43{W8H7rJm>#mkA+xl3E@0^}_94l?9GShA@0Sw@Tatvo+bH zp$3_q0Xr#6R5YvJdsSn~6VYCHHp?+d&>Zv$a=y7d8s6u3-kvKK92P$PfHbXcfeY;m zY=8E(fm>v`HD{uL!F~h3j7tzHc6VM!5;QqvM(xOv*`Qag=mXt;Bs{2vy_C!z8Xr4w zyjiW2el&M>kM_cnuhQ4FtDW9{tf78pdk=gVE0`oXD@hT{Rf%26fMEaU$2g}B^}%p$ z{s~H8V#F-IHXS7hoYS8=b%~4IuSQEEO0z#;lx;RwnjbkhR&53E*~5M8?0NMRes8=u zy*b66)0~{Ut8t|w-CI@wNzAqWGdDVH@*GGyN4hv#8iv0V z9XZ_49lkzQ5;Zwl?N+?x&n{|8$&oi$jnlO=yE zM3x~g4dHRgzJ`C$757(y_eaH8n`~06QL+2<4rGIe-ZTFGJ>_SDbJ=v`zNr-2)eE1` z{Fc)Xo^}3|V`&tn+i~@G=+({DaeIU7T#b3U-wVSxhGxTw3pS2bG2g$x^YS7xFpvgb zZ~eubH6ae^puD_+@#W#df&RT>{}=dM$J4Urh5K6!at@9dP%wHt8OJ>D)IX?rs#9t- zOx7F{7G-ugU;pWM9`8+n5orU~#>$Yb-w-SY))!EcX~WF8m0vI?w3HS5?4DeL3qcI7 zgiub&@UtpvH2m4esRsY@V+O;%DF6OVL%9<|5AN3`H&p(&=Fiw$z4J~(*8&jHKEFU4 zlQTA^)KqF}=-$Yo$+OV-3BsWzL+?ne^747V+k^O@e0FnZh{-&@nZ?5m_IOW7H7N19 zGl7hgrkzb6r&~{t`M(nl?tN)5NkUxIv(*tWhx59cejX>tU-E33_pjxIg`GM+VHO2) zKw=q+&Eg$ySk|()HZ?`U`)d#O=!0iHLVXLG*Bz{Tb=SGwpPnDny?Hxex?T2nAn$dU ziHY6hMMhRu3;=|`K7)VZ;Q7g(k`nZ# zQC*-xTtI+Y)A^>Z&%^8%`uPs&Z1T0g*UyB6|KvSDTrqYcY2bZ+)YaR23KKl~1R5TG z4f?jSyDMjDNe>btU#38qh^WMNkehsbex3tJpFh`sn(2R=nbA6|TSJFhTU~E2J<@Q$ zt)87OYshT~ZH}XO2knzI!SC_*=5Cyy8d(}|i(R2;`ff;Pf{!Rx*3J0!52HA9GeqfR z!O(uIebS8ZlN^pP+>6D|sCDmsjvMR_#8nMjrLgeu0?_=8;N{n#pp#yt!>wTA-0yK0 zjBko`WgZ{k33%<8FhuV#hlTE67SPj25OM}@=Ua>^KFA&Yd5|CwUR|yQCzo zt4k6{C9E)+n4O*7=k9Os+1_-tO189Puf6{sc@%&r(C}H-`x3B7_yqOw>)+&F>+}*o8CFHVfzNxI9J@bqm>!o@J|sM%v*s8Ks|mu%Z(Uae z9+i14u(}b-%jW!7ot(v%7;mtNiREQw*-P&aj*^mH0YuK>b!O2MMJ(Tui69+G)@_P~ z^^#No)W7~Y`Ar=V5C9ssZo z$lAGSKiu&do5R9+Y_ian_T@q^XvDymnAG8Ioiv_FXxDO%S8SsAq-eHR`Jw0v4DMH+ z<4en))o$|9MDMZA&`uf$q zwf@5+cQrjd@lQ)4IJoJ#xeZD8x0lXqRhA&_7Yq zx(U#INJv=d&u~8#^PrD8NMrzdQ2WAQWLw+2r`s_bsEY)xl1977Ujx<^`1%<#I!fl8 zy6u`=aZ!=Yzxm!MQji`P2L#;NG9!t~g#u7Om?Ct|7}|Q*0Xob{7#O6*s&8fzgUGGp z`>6@H&d$bR4_c;}P@3lF_kh50;d8lu>cj8B&3O}PeApmbaggSIefULX-mRMs&#&g! ziFcH$#lIYx0SBRrms@dpZ8_PDwo2f|WR+V3H0<=D{zX>W!xNRD{Y6jJ&N#aV(M+Q` z#*QV(5~u@SSno`^Nwv$D_V$UXiSPcqI3Vr6{%H#no8$u+p}yX8f2sCObxG8=*l1m2 zW9(0trfyeRrQr-{L()hY8?a$;m3w1=iN>JD4YCv-+nL|C=cudodCgO9eVw5+diOW4L1(}_Q@Co4#41~-WGlgrMNdU5mV?P|_xpY-qO`|(mnk>D0B&kkN>gK z8N3F>5G{Jw2K~iYke^SzqeW@b8@;9!Ei9+*g7d@o#$ggt4idYSx3@>n_*}Nh0frvJ zMg;O%(NDl=9x5sUA6powIfE>pOMdtH-1$Q7Jm)Fd&E?+kDQi>PWuqIGAV*w_=Gj|z z`XB2*KMB2j0ac*+FiAtB&l!3s+yMG!AhrGIlpzqXA*fw+V@ykX0SJ_)Gsi}eWcDBh z4-mhUiZKqMS1+Dwa}F4p={K_d{U=tP%Zw+pFnRIGP|+=1`G@F=ir-GED_B8rX%!|@ zib`mzp-BTtME~}7RY0OO*h#BL2&w}E3YChT=bv#v&GvXFO!c_)={8O317Y3!T8dbF z|64>>9@P(dPK7-Y1kc(=+9miB96}PXs6wKn?bQ+kkuc1Tmw~2C{fJNQ7k#`IIoj!< zYC0Q}7xLOv=}|~%1Lc|3WZCx2{_@tEf=*Y+V7t6i;d430#KryfJLNqM@DD)$xmv6D z?*ZWUzr$2rk9VB*&DVJJ^nX7zcwuMvMr;B^FE%CRgTW&GbNE4axh3s;9wVd74-ID6 zmqRJC($X((*VYmp&L`1(iTpMkN|J>L-FbOIpP72Uu;M}i&;Y>R-4C|v%w}@UUdRwG z9-za$ELz-<6bqqIRiN?XhQyjdX(m4zl#bNr5NFMEN%mkNi1FdmNk3vjl7GR=N54>4 zpUhf?S|#eXHyE0}n;@^KteDGrzI3!8;WIycXZhd*{G`r({z~pA1QQo94?Y233a}=G zU5=&>np`HZbUtrfvV$0*IR4T>prjmXu<`q^WYS+rXhBje&1vOE z>U9b&FLl#!M7=G6x&qHFHSGW5+?|dvhUd&+4$6tCFbzLPv~9)Pk&}X%=eCcj+dNw1 z3(PzDod42H;PGu3teJH-g8@gp49?!rP@~vliRXQ0G1mv#^Souas3fRtF~<)BTfDa4 z46g|xG#rJbz0y`4dG|lGYIgl^r`{f5NY&c#qJ66`a`u3Nx&>92I=)@1+VxM##iO&% zn=?h>)ibi?Ug|3Ktep7OEo9X#X<6>pehDl*i)#lH_PmeGb z)PNB(5R)BzUhoM;&6ulsCeWaAFrUstnLk54N!nh*ZqWKhMOv)xH(zbb6it`P3sp3Q zY`RT$(DeBgY}42+8w$-<+P@LM9!6+s)A?Q;ysD zqS__KK)1@!LDlPY3n(Nh=dZ?Tv&r3=F63kLFm01Zt zr>UG%RF%LYUFxjO`owai%nBb3-`xc-;q6I8hDK@R#D3ZuTXi|qsOHbwkaXc>!2z!U z-rWt3H;;GL;(j{AxUIhut)#?GJ+)x0UOlynw?_J4=!ba2=T-7`uRu-5T^-2ZzjD3H z8$qS%Zw~Da$6nBbiCg4D0~(VBTm1ZhEBxoLTT=&1>hzHm#T}W=#`W*s~ zE80ucnl3en3glD_g4YN%3aZXVJx>Amj8`yp3E9$f5&bg4NDh zyA4hr3j`N-Z8&Ujzpql0_2*)OB894ZJ7W?Nh*eH zeQL`xASl+!q-*#csTElh#HBy&JQK5JhAXIWnu1z+zFt%qhhhg=khOjuYVsv6-0EN? zTRR4wHg~&=pb?D*j8`S9$b?Ot-R<_z31(jT+IsZgfIFsi*+KkRLIXGd*K9%^wkyR= z!@b@`48VtkmGL`_kx1hgeRA%Y4TrlWhlAP?Lbl9nY^psal!?4bcbBO5mndz9MpolB zidwdU{a<-b1V>k6se$dnC!>{C$un#K2Q#lS_H@&I$R6GDq3W>kyGVWwJ6ljM+Rdya zO1)6EZAsKpN6iK#(z028636kV*^u)Ywr3SmY`FTj*^2?c#mFIrZ?x^D0#dKC1T`V< zb79+=U`=IK2D06!T)f;14!@MXmS$QCYq1=p5sn%gNs$ng#nHNhPp(UfJmN9ZVt+d3o)aCVOt^PGc6ALbA;fm^GqvWN4|1!=HY)jD5%{*()0khB^wxEojT=TA(JgXAB`jvpn#D>~W=^&MI`MI*JxuVp2 ze*1Xx6$ze8@0iga-8i^#>#FJ`?GuC&T4v>B@U)S+N^=H0dlmJT+|F$J%=hZaZ@+JH z;3OYAOc}cbS3vKP=xXrHv$v$ zq#3O`1Bzel&Jh-S+p>6chFOWSbP+;3fu;qF&|5?$k52_|e;3=5J|J`?B($U@#_uuH zjoexk7^~(Na4+St6Vth*$3uf9zLk~mObXszBsnWIMnwgN#>r*a-F7nccA~-prk=YS zfIQ-4Wb^4jZ^cYEC=QvrAw?nd_uEvnAeeT@MR1GIqY0uRi*+ lMi+*@qeB(3HS2fd&&hUnqbl#H0d5&iR!UK_MEqmG{{rW%L>K@7 literal 0 HcmV?d00001 diff --git a/autogpts/autogpt/plugins/.keep b/autogpts/autogpt/plugins/.keep new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/autogpts/autogpt/poetry.lock b/autogpts/autogpt/poetry.lock new file mode 100644 index 000000000000..21a6c382b09f --- /dev/null +++ b/autogpts/autogpt/poetry.lock @@ -0,0 +1,7266 @@ +# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. + +[[package]] +name = "abstract-singleton" +version = "1.0.1" +description = "An abstract singleton class." +optional = false +python-versions = ">=3.8" +files = [ + {file = "abstract_singleton-1.0.1-py3-none-any.whl", hash = "sha256:1f5e2359a609360bc08d975f578cce75a752df06db561efb679e69646199ec1d"}, + {file = "abstract_singleton-1.0.1.tar.gz", hash = "sha256:d97d26ecbcb7422f78df1b0bca48a03df5ba04cf58844c6da033a7840beaae82"}, +] + +[[package]] +name = "agbenchmark" +version = "0.0.10" +description = "Benchmarking the performance of agents far and wide, regardless of how they are set up and how they work" +optional = true +python-versions = "^3.10" +files = [] +develop = false + +[package.dependencies] +agent-protocol-client = "^1.1.0" +click = "^8.1.3" +click-default-group = "^1.2.4" +colorama = "^0.4.6" +fastapi = "^0.109.1" +gitpython = "^3.1.32" +httpx = "^0.24.0" +matplotlib = "^3.7.2" +networkx = "^3.1" +openai = "^1.7.2" +pandas = "^2.0.3" +pexpect = "^4.8.0" +psutil = "^5.9.5" +pydantic = "^1.10.9" +pytest = "^7.3.2" +pytest-asyncio = "^0.21.1" +python-dotenv = "^1.0.0" +python-multipart = "^0.0.7" +pyvis = "^0.3.2" +requests = "^2.31.0" +selenium = "^4.11.2" +tabulate = "^0.9.0" +toml = "^0.10.2" +types-requests = "^2.31.0.1" +uvicorn = "^0.23.2" + +[package.source] +type = "directory" +url = "../../benchmark" + +[[package]] +name = "agent-protocol-client" +version = "1.1.0" +description = "Agent Communication Protocol Client" +optional = true +python-versions = ">=3.7,<4.0" +files = [ + {file = "agent_protocol_client-1.1.0-py3-none-any.whl", hash = "sha256:0e8c6c97244189666ed18e320410abddce8c9dfb75437da1e590bbef3b6268be"}, + {file = "agent_protocol_client-1.1.0.tar.gz", hash = "sha256:aa7e1042de1249477fdc29c2df08a44f2233dade9c02c1279e37c98e9d3a0d72"}, +] + +[package.dependencies] +aiohttp = ">=3.8.4,<4.0.0" +pydantic = ">=1.10.5,<2.0.0" +python-dateutil = ">=2.8.2,<3.0.0" +urllib3 = ">=1.25.3,<2.0.0" + +[[package]] +name = "aiohttp" +version = "3.9.3" +description = "Async http client/server framework (asyncio)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "aiohttp-3.9.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:939677b61f9d72a4fa2a042a5eee2a99a24001a67c13da113b2e30396567db54"}, + {file = "aiohttp-3.9.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1f5cd333fcf7590a18334c90f8c9147c837a6ec8a178e88d90a9b96ea03194cc"}, + {file = "aiohttp-3.9.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:82e6aa28dd46374f72093eda8bcd142f7771ee1eb9d1e223ff0fa7177a96b4a5"}, + {file = "aiohttp-3.9.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f56455b0c2c7cc3b0c584815264461d07b177f903a04481dfc33e08a89f0c26b"}, + {file = "aiohttp-3.9.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bca77a198bb6e69795ef2f09a5f4c12758487f83f33d63acde5f0d4919815768"}, + {file = "aiohttp-3.9.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e083c285857b78ee21a96ba1eb1b5339733c3563f72980728ca2b08b53826ca5"}, + {file = "aiohttp-3.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab40e6251c3873d86ea9b30a1ac6d7478c09277b32e14745d0d3c6e76e3c7e29"}, + {file = "aiohttp-3.9.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:df822ee7feaaeffb99c1a9e5e608800bd8eda6e5f18f5cfb0dc7eeb2eaa6bbec"}, + {file = "aiohttp-3.9.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:acef0899fea7492145d2bbaaaec7b345c87753168589cc7faf0afec9afe9b747"}, + {file = "aiohttp-3.9.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:cd73265a9e5ea618014802ab01babf1940cecb90c9762d8b9e7d2cc1e1969ec6"}, + {file = "aiohttp-3.9.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:a78ed8a53a1221393d9637c01870248a6f4ea5b214a59a92a36f18151739452c"}, + {file = "aiohttp-3.9.3-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:6b0e029353361f1746bac2e4cc19b32f972ec03f0f943b390c4ab3371840aabf"}, + {file = "aiohttp-3.9.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7cf5c9458e1e90e3c390c2639f1017a0379a99a94fdfad3a1fd966a2874bba52"}, + {file = "aiohttp-3.9.3-cp310-cp310-win32.whl", hash = "sha256:3e59c23c52765951b69ec45ddbbc9403a8761ee6f57253250c6e1536cacc758b"}, + {file = "aiohttp-3.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:055ce4f74b82551678291473f66dc9fb9048a50d8324278751926ff0ae7715e5"}, + {file = "aiohttp-3.9.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6b88f9386ff1ad91ace19d2a1c0225896e28815ee09fc6a8932fded8cda97c3d"}, + {file = "aiohttp-3.9.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c46956ed82961e31557b6857a5ca153c67e5476972e5f7190015018760938da2"}, + {file = "aiohttp-3.9.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:07b837ef0d2f252f96009e9b8435ec1fef68ef8b1461933253d318748ec1acdc"}, + {file = "aiohttp-3.9.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad46e6f620574b3b4801c68255492e0159d1712271cc99d8bdf35f2043ec266"}, + {file = "aiohttp-3.9.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ed3e046ea7b14938112ccd53d91c1539af3e6679b222f9469981e3dac7ba1ce"}, + {file = "aiohttp-3.9.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:039df344b45ae0b34ac885ab5b53940b174530d4dd8a14ed8b0e2155b9dddccb"}, + {file = "aiohttp-3.9.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7943c414d3a8d9235f5f15c22ace69787c140c80b718dcd57caaade95f7cd93b"}, + {file = "aiohttp-3.9.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:84871a243359bb42c12728f04d181a389718710129b36b6aad0fc4655a7647d4"}, + {file = "aiohttp-3.9.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:5eafe2c065df5401ba06821b9a054d9cb2848867f3c59801b5d07a0be3a380ae"}, + {file = "aiohttp-3.9.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:9d3c9b50f19704552f23b4eaea1fc082fdd82c63429a6506446cbd8737823da3"}, + {file = "aiohttp-3.9.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:f033d80bc6283092613882dfe40419c6a6a1527e04fc69350e87a9df02bbc283"}, + {file = "aiohttp-3.9.3-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:2c895a656dd7e061b2fd6bb77d971cc38f2afc277229ce7dd3552de8313a483e"}, + {file = "aiohttp-3.9.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1f5a71d25cd8106eab05f8704cd9167b6e5187bcdf8f090a66c6d88b634802b4"}, + {file = "aiohttp-3.9.3-cp311-cp311-win32.whl", hash = "sha256:50fca156d718f8ced687a373f9e140c1bb765ca16e3d6f4fe116e3df7c05b2c5"}, + {file = "aiohttp-3.9.3-cp311-cp311-win_amd64.whl", hash = "sha256:5fe9ce6c09668063b8447f85d43b8d1c4e5d3d7e92c63173e6180b2ac5d46dd8"}, + {file = "aiohttp-3.9.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:38a19bc3b686ad55804ae931012f78f7a534cce165d089a2059f658f6c91fa60"}, + {file = "aiohttp-3.9.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:770d015888c2a598b377bd2f663adfd947d78c0124cfe7b959e1ef39f5b13869"}, + {file = "aiohttp-3.9.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ee43080e75fc92bf36219926c8e6de497f9b247301bbf88c5c7593d931426679"}, + {file = "aiohttp-3.9.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52df73f14ed99cee84865b95a3d9e044f226320a87af208f068ecc33e0c35b96"}, + {file = "aiohttp-3.9.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc9b311743a78043b26ffaeeb9715dc360335e5517832f5a8e339f8a43581e4d"}, + {file = "aiohttp-3.9.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b955ed993491f1a5da7f92e98d5dad3c1e14dc175f74517c4e610b1f2456fb11"}, + {file = "aiohttp-3.9.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:504b6981675ace64c28bf4a05a508af5cde526e36492c98916127f5a02354d53"}, + {file = "aiohttp-3.9.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a6fe5571784af92b6bc2fda8d1925cccdf24642d49546d3144948a6a1ed58ca5"}, + {file = "aiohttp-3.9.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ba39e9c8627edc56544c8628cc180d88605df3892beeb2b94c9bc857774848ca"}, + {file = "aiohttp-3.9.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:e5e46b578c0e9db71d04c4b506a2121c0cb371dd89af17a0586ff6769d4c58c1"}, + {file = "aiohttp-3.9.3-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:938a9653e1e0c592053f815f7028e41a3062e902095e5a7dc84617c87267ebd5"}, + {file = "aiohttp-3.9.3-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:c3452ea726c76e92f3b9fae4b34a151981a9ec0a4847a627c43d71a15ac32aa6"}, + {file = "aiohttp-3.9.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ff30218887e62209942f91ac1be902cc80cddb86bf00fbc6783b7a43b2bea26f"}, + {file = "aiohttp-3.9.3-cp312-cp312-win32.whl", hash = "sha256:38f307b41e0bea3294a9a2a87833191e4bcf89bb0365e83a8be3a58b31fb7f38"}, + {file = "aiohttp-3.9.3-cp312-cp312-win_amd64.whl", hash = "sha256:b791a3143681a520c0a17e26ae7465f1b6f99461a28019d1a2f425236e6eedb5"}, + {file = "aiohttp-3.9.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0ed621426d961df79aa3b963ac7af0d40392956ffa9be022024cd16297b30c8c"}, + {file = "aiohttp-3.9.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7f46acd6a194287b7e41e87957bfe2ad1ad88318d447caf5b090012f2c5bb528"}, + {file = "aiohttp-3.9.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:feeb18a801aacb098220e2c3eea59a512362eb408d4afd0c242044c33ad6d542"}, + {file = "aiohttp-3.9.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f734e38fd8666f53da904c52a23ce517f1b07722118d750405af7e4123933511"}, + {file = "aiohttp-3.9.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b40670ec7e2156d8e57f70aec34a7216407848dfe6c693ef131ddf6e76feb672"}, + {file = "aiohttp-3.9.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fdd215b7b7fd4a53994f238d0f46b7ba4ac4c0adb12452beee724ddd0743ae5d"}, + {file = "aiohttp-3.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:017a21b0df49039c8f46ca0971b3a7fdc1f56741ab1240cb90ca408049766168"}, + {file = "aiohttp-3.9.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e99abf0bba688259a496f966211c49a514e65afa9b3073a1fcee08856e04425b"}, + {file = "aiohttp-3.9.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:648056db9a9fa565d3fa851880f99f45e3f9a771dd3ff3bb0c048ea83fb28194"}, + {file = "aiohttp-3.9.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8aacb477dc26797ee089721536a292a664846489c49d3ef9725f992449eda5a8"}, + {file = "aiohttp-3.9.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:522a11c934ea660ff8953eda090dcd2154d367dec1ae3c540aff9f8a5c109ab4"}, + {file = "aiohttp-3.9.3-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:5bce0dc147ca85caa5d33debc4f4d65e8e8b5c97c7f9f660f215fa74fc49a321"}, + {file = "aiohttp-3.9.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4b4af9f25b49a7be47c0972139e59ec0e8285c371049df1a63b6ca81fdd216a2"}, + {file = "aiohttp-3.9.3-cp38-cp38-win32.whl", hash = "sha256:298abd678033b8571995650ccee753d9458dfa0377be4dba91e4491da3f2be63"}, + {file = "aiohttp-3.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:69361bfdca5468c0488d7017b9b1e5ce769d40b46a9f4a2eed26b78619e9396c"}, + {file = "aiohttp-3.9.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0fa43c32d1643f518491d9d3a730f85f5bbaedcbd7fbcae27435bb8b7a061b29"}, + {file = "aiohttp-3.9.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:835a55b7ca49468aaaac0b217092dfdff370e6c215c9224c52f30daaa735c1c1"}, + {file = "aiohttp-3.9.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:06a9b2c8837d9a94fae16c6223acc14b4dfdff216ab9b7202e07a9a09541168f"}, + {file = "aiohttp-3.9.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:abf151955990d23f84205286938796c55ff11bbfb4ccfada8c9c83ae6b3c89a3"}, + {file = "aiohttp-3.9.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59c26c95975f26e662ca78fdf543d4eeaef70e533a672b4113dd888bd2423caa"}, + {file = "aiohttp-3.9.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f95511dd5d0e05fd9728bac4096319f80615aaef4acbecb35a990afebe953b0e"}, + {file = "aiohttp-3.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:595f105710293e76b9dc09f52e0dd896bd064a79346234b521f6b968ffdd8e58"}, + {file = "aiohttp-3.9.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7c8b816c2b5af5c8a436df44ca08258fc1a13b449393a91484225fcb7545533"}, + {file = "aiohttp-3.9.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f1088fa100bf46e7b398ffd9904f4808a0612e1d966b4aa43baa535d1b6341eb"}, + {file = "aiohttp-3.9.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f59dfe57bb1ec82ac0698ebfcdb7bcd0e99c255bd637ff613760d5f33e7c81b3"}, + {file = "aiohttp-3.9.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:361a1026c9dd4aba0109e4040e2aecf9884f5cfe1b1b1bd3d09419c205e2e53d"}, + {file = "aiohttp-3.9.3-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:363afe77cfcbe3a36353d8ea133e904b108feea505aa4792dad6585a8192c55a"}, + {file = "aiohttp-3.9.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8e2c45c208c62e955e8256949eb225bd8b66a4c9b6865729a786f2aa79b72e9d"}, + {file = "aiohttp-3.9.3-cp39-cp39-win32.whl", hash = "sha256:f7217af2e14da0856e082e96ff637f14ae45c10a5714b63c77f26d8884cf1051"}, + {file = "aiohttp-3.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:27468897f628c627230dba07ec65dc8d0db566923c48f29e084ce382119802bc"}, + {file = "aiohttp-3.9.3.tar.gz", hash = "sha256:90842933e5d1ff760fae6caca4b2b3edba53ba8f4b71e95dacf2818a2aca06f7"}, +] + +[package.dependencies] +aiosignal = ">=1.1.2" +async-timeout = {version = ">=4.0,<5.0", markers = "python_version < \"3.11\""} +attrs = ">=17.3.0" +frozenlist = ">=1.1.1" +multidict = ">=4.5,<7.0" +yarl = ">=1.0,<2.0" + +[package.extras] +speedups = ["Brotli", "aiodns", "brotlicffi"] + +[[package]] +name = "aiosignal" +version = "1.3.1" +description = "aiosignal: a list of registered asynchronous callbacks" +optional = false +python-versions = ">=3.7" +files = [ + {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, + {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, +] + +[package.dependencies] +frozenlist = ">=1.1.0" + +[[package]] +name = "anyio" +version = "4.2.0" +description = "High level compatibility layer for multiple asynchronous event loop implementations" +optional = false +python-versions = ">=3.8" +files = [ + {file = "anyio-4.2.0-py3-none-any.whl", hash = "sha256:745843b39e829e108e518c489b31dc757de7d2131d53fac32bd8df268227bfee"}, + {file = "anyio-4.2.0.tar.gz", hash = "sha256:e1875bb4b4e2de1669f4bc7869b6d3f54231cdced71605e6e64c9be77e3be50f"}, +] + +[package.dependencies] +exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} +idna = ">=2.8" +sniffio = ">=1.1" +typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} + +[package.extras] +doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] +trio = ["trio (>=0.23)"] + +[[package]] +name = "asgiref" +version = "3.7.2" +description = "ASGI specs, helper code, and adapters" +optional = false +python-versions = ">=3.7" +files = [ + {file = "asgiref-3.7.2-py3-none-any.whl", hash = "sha256:89b2ef2247e3b562a16eef663bc0e2e703ec6468e2fa8a5cd61cd449786d4f6e"}, + {file = "asgiref-3.7.2.tar.gz", hash = "sha256:9e0ce3aa93a819ba5b45120216b23878cf6e8525eb3848653452b4192b92afed"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4", markers = "python_version < \"3.11\""} + +[package.extras] +tests = ["mypy (>=0.800)", "pytest", "pytest-asyncio"] + +[[package]] +name = "asttokens" +version = "2.4.1" +description = "Annotate AST trees with source code positions" +optional = true +python-versions = "*" +files = [ + {file = "asttokens-2.4.1-py2.py3-none-any.whl", hash = "sha256:051ed49c3dcae8913ea7cd08e46a606dba30b79993209636c4875bc1d637bc24"}, + {file = "asttokens-2.4.1.tar.gz", hash = "sha256:b03869718ba9a6eb027e134bfdf69f38a236d681c83c160d510768af11254ba0"}, +] + +[package.dependencies] +six = ">=1.12.0" + +[package.extras] +astroid = ["astroid (>=1,<2)", "astroid (>=2,<4)"] +test = ["astroid (>=1,<2)", "astroid (>=2,<4)", "pytest"] + +[[package]] +name = "async-timeout" +version = "4.0.3" +description = "Timeout context manager for asyncio programs" +optional = false +python-versions = ">=3.7" +files = [ + {file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"}, + {file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"}, +] + +[[package]] +name = "asynctest" +version = "0.13.0" +description = "Enhance the standard unittest package with features for testing asyncio libraries" +optional = false +python-versions = ">=3.5" +files = [ + {file = "asynctest-0.13.0-py3-none-any.whl", hash = "sha256:5da6118a7e6d6b54d83a8f7197769d046922a44d2a99c21382f0a6e4fadae676"}, + {file = "asynctest-0.13.0.tar.gz", hash = "sha256:c27862842d15d83e6a34eb0b2866c323880eb3a75e4485b079ea11748fd77fac"}, +] + +[[package]] +name = "attrs" +version = "23.2.0" +description = "Classes Without Boilerplate" +optional = false +python-versions = ">=3.7" +files = [ + {file = "attrs-23.2.0-py3-none-any.whl", hash = "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"}, + {file = "attrs-23.2.0.tar.gz", hash = "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30"}, +] + +[package.extras] +cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] +dev = ["attrs[tests]", "pre-commit"] +docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] +tests = ["attrs[tests-no-zope]", "zope-interface"] +tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"] +tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"] + +[[package]] +name = "auto_gpt_plugin_template" +version = "0.0.2" +description = "The template plugin for Auto-GPT." +optional = false +python-versions = ">=3.8" +files = [] +develop = false + +[package.dependencies] +abstract-singleton = "*" + +[package.source] +type = "git" +url = "https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template" +reference = "0.1.0" +resolved_reference = "7612a14c629dc64ad870eee4d05850d60e1dd9ce" + +[[package]] +name = "autoflake" +version = "2.2.1" +description = "Removes unused imports and unused variables" +optional = false +python-versions = ">=3.8" +files = [ + {file = "autoflake-2.2.1-py3-none-any.whl", hash = "sha256:265cde0a43c1f44ecfb4f30d95b0437796759d07be7706a2f70e4719234c0f79"}, + {file = "autoflake-2.2.1.tar.gz", hash = "sha256:62b7b6449a692c3c9b0c916919bbc21648da7281e8506bcf8d3f8280e431ebc1"}, +] + +[package.dependencies] +pyflakes = ">=3.0.0" +tomli = {version = ">=2.0.1", markers = "python_version < \"3.11\""} + +[[package]] +name = "autogpt-forge" +version = "0.1.0" +description = "" +optional = false +python-versions = "^3.10" +files = [] +develop = false + +[package.dependencies] +aiohttp = "^3.8.5" +bs4 = "^0.0.1" +chromadb = "^0.4.10" +colorlog = "^6.7.0" +duckduckgo-search = "^5.0.0" +google-cloud-storage = "^2.13.0" +jinja2 = "^3.1.2" +litellm = "^1.17.9" +openai = "^1.7.2" +python-dotenv = "^1.0.0" +python-multipart = "^0.0.7" +selenium = "^4.13.0" +sqlalchemy = "^2.0.19" +tenacity = "^8.2.2" +toml = "^0.10.2" +uvicorn = "^0.23.2" +webdriver-manager = "^4.0.1" + +[package.extras] +benchmark = ["agbenchmark @ git+https://github.com/Significant-Gravitas/AutoGPT.git#subdirectory=benchmark"] + +[package.source] +type = "git" +url = "https://github.com/Significant-Gravitas/AutoGPT.git" +reference = "ab05b7ae70754c063909" +resolved_reference = "ab05b7ae70754c06390982d237d86dc7290cd1aa" +subdirectory = "autogpts/forge" + +[[package]] +name = "backoff" +version = "2.2.1" +description = "Function decoration for backoff and retry" +optional = false +python-versions = ">=3.7,<4.0" +files = [ + {file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"}, + {file = "backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba"}, +] + +[[package]] +name = "bcrypt" +version = "4.1.2" +description = "Modern password hashing for your software and your servers" +optional = false +python-versions = ">=3.7" +files = [ + {file = "bcrypt-4.1.2-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:ac621c093edb28200728a9cca214d7e838529e557027ef0581685909acd28b5e"}, + {file = "bcrypt-4.1.2-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea505c97a5c465ab8c3ba75c0805a102ce526695cd6818c6de3b1a38f6f60da1"}, + {file = "bcrypt-4.1.2-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:57fa9442758da926ed33a91644649d3e340a71e2d0a5a8de064fb621fd5a3326"}, + {file = "bcrypt-4.1.2-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:eb3bd3321517916696233b5e0c67fd7d6281f0ef48e66812db35fc963a422a1c"}, + {file = "bcrypt-4.1.2-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:6cad43d8c63f34b26aef462b6f5e44fdcf9860b723d2453b5d391258c4c8e966"}, + {file = "bcrypt-4.1.2-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:44290ccc827d3a24604f2c8bcd00d0da349e336e6503656cb8192133e27335e2"}, + {file = "bcrypt-4.1.2-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:732b3920a08eacf12f93e6b04ea276c489f1c8fb49344f564cca2adb663b3e4c"}, + {file = "bcrypt-4.1.2-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:1c28973decf4e0e69cee78c68e30a523be441972c826703bb93099868a8ff5b5"}, + {file = "bcrypt-4.1.2-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b8df79979c5bae07f1db22dcc49cc5bccf08a0380ca5c6f391cbb5790355c0b0"}, + {file = "bcrypt-4.1.2-cp37-abi3-win32.whl", hash = "sha256:fbe188b878313d01b7718390f31528be4010fed1faa798c5a1d0469c9c48c369"}, + {file = "bcrypt-4.1.2-cp37-abi3-win_amd64.whl", hash = "sha256:9800ae5bd5077b13725e2e3934aa3c9c37e49d3ea3d06318010aa40f54c63551"}, + {file = "bcrypt-4.1.2-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:71b8be82bc46cedd61a9f4ccb6c1a493211d031415a34adde3669ee1b0afbb63"}, + {file = "bcrypt-4.1.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e3c6642077b0c8092580c819c1684161262b2e30c4f45deb000c38947bf483"}, + {file = "bcrypt-4.1.2-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:387e7e1af9a4dd636b9505a465032f2f5cb8e61ba1120e79a0e1cd0b512f3dfc"}, + {file = "bcrypt-4.1.2-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f70d9c61f9c4ca7d57f3bfe88a5ccf62546ffbadf3681bb1e268d9d2e41c91a7"}, + {file = "bcrypt-4.1.2-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:2a298db2a8ab20056120b45e86c00a0a5eb50ec4075b6142db35f593b97cb3fb"}, + {file = "bcrypt-4.1.2-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:ba55e40de38a24e2d78d34c2d36d6e864f93e0d79d0b6ce915e4335aa81d01b1"}, + {file = "bcrypt-4.1.2-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:3566a88234e8de2ccae31968127b0ecccbb4cddb629da744165db72b58d88ca4"}, + {file = "bcrypt-4.1.2-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:b90e216dc36864ae7132cb151ffe95155a37a14e0de3a8f64b49655dd959ff9c"}, + {file = "bcrypt-4.1.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:69057b9fc5093ea1ab00dd24ede891f3e5e65bee040395fb1e66ee196f9c9b4a"}, + {file = "bcrypt-4.1.2-cp39-abi3-win32.whl", hash = "sha256:02d9ef8915f72dd6daaef40e0baeef8a017ce624369f09754baf32bb32dba25f"}, + {file = "bcrypt-4.1.2-cp39-abi3-win_amd64.whl", hash = "sha256:be3ab1071662f6065899fe08428e45c16aa36e28bc42921c4901a191fda6ee42"}, + {file = "bcrypt-4.1.2-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:d75fc8cd0ba23f97bae88a6ec04e9e5351ff3c6ad06f38fe32ba50cbd0d11946"}, + {file = "bcrypt-4.1.2-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:a97e07e83e3262599434816f631cc4c7ca2aa8e9c072c1b1a7fec2ae809a1d2d"}, + {file = "bcrypt-4.1.2-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:e51c42750b7585cee7892c2614be0d14107fad9581d1738d954a262556dd1aab"}, + {file = "bcrypt-4.1.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:ba4e4cc26610581a6329b3937e02d319f5ad4b85b074846bf4fef8a8cf51e7bb"}, + {file = "bcrypt-4.1.2.tar.gz", hash = "sha256:33313a1200a3ae90b75587ceac502b048b840fc69e7f7a0905b5f87fac7a1258"}, +] + +[package.extras] +tests = ["pytest (>=3.2.1,!=3.3.0)"] +typecheck = ["mypy"] + +[[package]] +name = "beautifulsoup4" +version = "4.12.2" +description = "Screen-scraping library" +optional = false +python-versions = ">=3.6.0" +files = [ + {file = "beautifulsoup4-4.12.2-py3-none-any.whl", hash = "sha256:bd2520ca0d9d7d12694a53d44ac482d181b4ec1888909b035a3dbf40d0f57d4a"}, + {file = "beautifulsoup4-4.12.2.tar.gz", hash = "sha256:492bbc69dca35d12daac71c4db1bfff0c876c00ef4a2ffacce226d4638eb72da"}, +] + +[package.dependencies] +soupsieve = ">1.2" + +[package.extras] +html5lib = ["html5lib"] +lxml = ["lxml"] + +[[package]] +name = "black" +version = "23.12.1" +description = "The uncompromising code formatter." +optional = false +python-versions = ">=3.8" +files = [ + {file = "black-23.12.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e0aaf6041986767a5e0ce663c7a2f0e9eaf21e6ff87a5f95cbf3675bfd4c41d2"}, + {file = "black-23.12.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c88b3711d12905b74206227109272673edce0cb29f27e1385f33b0163c414bba"}, + {file = "black-23.12.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a920b569dc6b3472513ba6ddea21f440d4b4c699494d2e972a1753cdc25df7b0"}, + {file = "black-23.12.1-cp310-cp310-win_amd64.whl", hash = "sha256:3fa4be75ef2a6b96ea8d92b1587dd8cb3a35c7e3d51f0738ced0781c3aa3a5a3"}, + {file = "black-23.12.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8d4df77958a622f9b5a4c96edb4b8c0034f8434032ab11077ec6c56ae9f384ba"}, + {file = "black-23.12.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:602cfb1196dc692424c70b6507593a2b29aac0547c1be9a1d1365f0d964c353b"}, + {file = "black-23.12.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c4352800f14be5b4864016882cdba10755bd50805c95f728011bcb47a4afd59"}, + {file = "black-23.12.1-cp311-cp311-win_amd64.whl", hash = "sha256:0808494f2b2df923ffc5723ed3c7b096bd76341f6213989759287611e9837d50"}, + {file = "black-23.12.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:25e57fd232a6d6ff3f4478a6fd0580838e47c93c83eaf1ccc92d4faf27112c4e"}, + {file = "black-23.12.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2d9e13db441c509a3763a7a3d9a49ccc1b4e974a47be4e08ade2a228876500ec"}, + {file = "black-23.12.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d1bd9c210f8b109b1762ec9fd36592fdd528485aadb3f5849b2740ef17e674e"}, + {file = "black-23.12.1-cp312-cp312-win_amd64.whl", hash = "sha256:ae76c22bde5cbb6bfd211ec343ded2163bba7883c7bc77f6b756a1049436fbb9"}, + {file = "black-23.12.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1fa88a0f74e50e4487477bc0bb900c6781dbddfdfa32691e780bf854c3b4a47f"}, + {file = "black-23.12.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a4d6a9668e45ad99d2f8ec70d5c8c04ef4f32f648ef39048d010b0689832ec6d"}, + {file = "black-23.12.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b18fb2ae6c4bb63eebe5be6bd869ba2f14fd0259bda7d18a46b764d8fb86298a"}, + {file = "black-23.12.1-cp38-cp38-win_amd64.whl", hash = "sha256:c04b6d9d20e9c13f43eee8ea87d44156b8505ca8a3c878773f68b4e4812a421e"}, + {file = "black-23.12.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3e1b38b3135fd4c025c28c55ddfc236b05af657828a8a6abe5deec419a0b7055"}, + {file = "black-23.12.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4f0031eaa7b921db76decd73636ef3a12c942ed367d8c3841a0739412b260a54"}, + {file = "black-23.12.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:97e56155c6b737854e60a9ab1c598ff2533d57e7506d97af5481141671abf3ea"}, + {file = "black-23.12.1-cp39-cp39-win_amd64.whl", hash = "sha256:dd15245c8b68fe2b6bd0f32c1556509d11bb33aec9b5d0866dd8e2ed3dba09c2"}, + {file = "black-23.12.1-py3-none-any.whl", hash = "sha256:78baad24af0f033958cad29731e27363183e140962595def56423e626f4bee3e"}, + {file = "black-23.12.1.tar.gz", hash = "sha256:4ce3ef14ebe8d9509188014d96af1c456a910d5b5cbf434a09fef7e024b3d0d5"}, +] + +[package.dependencies] +click = ">=8.0.0" +mypy-extensions = ">=0.4.3" +packaging = ">=22.0" +pathspec = ">=0.9.0" +platformdirs = ">=2" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""} + +[package.extras] +colorama = ["colorama (>=0.4.3)"] +d = ["aiohttp (>=3.7.4)", "aiohttp (>=3.7.4,!=3.9.0)"] +jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] +uvloop = ["uvloop (>=0.15.2)"] + +[[package]] +name = "blis" +version = "0.7.11" +description = "The Blis BLAS-like linear algebra library, as a self-contained C-extension." +optional = false +python-versions = "*" +files = [ + {file = "blis-0.7.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cd5fba34c5775e4c440d80e4dea8acb40e2d3855b546e07c4e21fad8f972404c"}, + {file = "blis-0.7.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:31273d9086cab9c56986d478e3ed6da6752fa4cdd0f7b5e8e5db30827912d90d"}, + {file = "blis-0.7.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d06883f83d4c8de8264154f7c4a420b4af323050ed07398c1ff201c34c25c0d2"}, + {file = "blis-0.7.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee493683e3043650d4413d531e79e580d28a3c7bdd184f1b9cfa565497bda1e7"}, + {file = "blis-0.7.11-cp310-cp310-win_amd64.whl", hash = "sha256:a73945a9d635eea528bccfdfcaa59dd35bd5f82a4a40d5ca31f08f507f3a6f81"}, + {file = "blis-0.7.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1b68df4d01d62f9adaef3dad6f96418787265a6878891fc4e0fabafd6d02afba"}, + {file = "blis-0.7.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:162e60d941a8151418d558a94ee5547cb1bbeed9f26b3b6f89ec9243f111a201"}, + {file = "blis-0.7.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:686a7d0111d5ba727cd62f374748952fd6eb74701b18177f525b16209a253c01"}, + {file = "blis-0.7.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0421d6e44cda202b113a34761f9a062b53f8c2ae8e4ec8325a76e709fca93b6e"}, + {file = "blis-0.7.11-cp311-cp311-win_amd64.whl", hash = "sha256:0dc9dcb3843045b6b8b00432409fd5ee96b8344a324e031bfec7303838c41a1a"}, + {file = "blis-0.7.11-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:dadf8713ea51d91444d14ad4104a5493fa7ecc401bbb5f4a203ff6448fadb113"}, + {file = "blis-0.7.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5bcdaf370f03adaf4171d6405a89fa66cb3c09399d75fc02e1230a78cd2759e4"}, + {file = "blis-0.7.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7de19264b1d49a178bf8035406d0ae77831f3bfaa3ce02942964a81a202abb03"}, + {file = "blis-0.7.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ea55c6a4a60fcbf6a0fdce40df6e254451ce636988323a34b9c94b583fc11e5"}, + {file = "blis-0.7.11-cp312-cp312-win_amd64.whl", hash = "sha256:5a305dbfc96d202a20d0edd6edf74a406b7e1404f4fa4397d24c68454e60b1b4"}, + {file = "blis-0.7.11-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:68544a1cbc3564db7ba54d2bf8988356b8c7acd025966e8e9313561b19f0fe2e"}, + {file = "blis-0.7.11-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:075431b13b9dd7b411894d4afbd4212acf4d0f56c5a20628f4b34902e90225f1"}, + {file = "blis-0.7.11-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:324fdf62af9075831aa62b51481960e8465674b7723f977684e32af708bb7448"}, + {file = "blis-0.7.11-cp36-cp36m-win_amd64.whl", hash = "sha256:afebdb02d2dcf9059f23ce1244585d3ce7e95c02a77fd45a500e4a55b7b23583"}, + {file = "blis-0.7.11-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2e62cd14b20e960f21547fee01f3a0b2ac201034d819842865a667c969c355d1"}, + {file = "blis-0.7.11-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89b01c05a5754edc0b9a3b69be52cbee03f645b2ec69651d12216ea83b8122f0"}, + {file = "blis-0.7.11-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cfee5ec52ba1e9002311d9191f7129d7b0ecdff211e88536fb24c865d102b50d"}, + {file = "blis-0.7.11-cp37-cp37m-win_amd64.whl", hash = "sha256:844b6377e3e7f3a2e92e7333cc644095386548ad5a027fdc150122703c009956"}, + {file = "blis-0.7.11-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6df00c24128e323174cde5d80ebe3657df39615322098ce06613845433057614"}, + {file = "blis-0.7.11-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:809d1da1331108935bf06e22f3cf07ef73a41a572ecd81575bdedb67defe3465"}, + {file = "blis-0.7.11-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bfabd5272bbbe504702b8dfe30093653d278057656126716ff500d9c184b35a6"}, + {file = "blis-0.7.11-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca684f5c2f05269f17aefe7812360286e9a1cee3afb96d416485efd825dbcf19"}, + {file = "blis-0.7.11-cp38-cp38-win_amd64.whl", hash = "sha256:688a8b21d2521c2124ee8dfcbaf2c385981ccc27e313e052113d5db113e27d3b"}, + {file = "blis-0.7.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2ff7abd784033836b284ff9f4d0d7cb0737b7684daebb01a4c9fe145ffa5a31e"}, + {file = "blis-0.7.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f9caffcd14795bfe52add95a0dd8426d44e737b55fcb69e2b797816f4da0b1d2"}, + {file = "blis-0.7.11-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2fb36989ed61233cfd48915896802ee6d3d87882190000f8cfe0cf4a3819f9a8"}, + {file = "blis-0.7.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ea09f961871f880d5dc622dce6c370e4859559f0ead897ae9b20ddafd6b07a2"}, + {file = "blis-0.7.11-cp39-cp39-win_amd64.whl", hash = "sha256:5bb38adabbb22f69f22c74bad025a010ae3b14de711bf5c715353980869d491d"}, + {file = "blis-0.7.11.tar.gz", hash = "sha256:cec6d48f75f7ac328ae1b6fbb372dde8c8a57c89559172277f66e01ff08d4d42"}, +] + +[package.dependencies] +numpy = {version = ">=1.19.0", markers = "python_version >= \"3.9\""} + +[[package]] +name = "boto3" +version = "1.34.20" +description = "The AWS SDK for Python" +optional = false +python-versions = ">= 3.8" +files = [ + {file = "boto3-1.34.20-py3-none-any.whl", hash = "sha256:a21da54634bd09dcad9e80d106512b6aabe493b1d4260688180156ef27afedc9"}, + {file = "boto3-1.34.20.tar.gz", hash = "sha256:7f662b0c833e7a4d1272b7ec60ded3f14affd54d08620b708ba3abeb0e49d15e"}, +] + +[package.dependencies] +botocore = ">=1.34.20,<1.35.0" +jmespath = ">=0.7.1,<2.0.0" +s3transfer = ">=0.10.0,<0.11.0" + +[package.extras] +crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] + +[[package]] +name = "boto3-stubs" +version = "1.34.20" +description = "Type annotations for boto3 1.34.20 generated with mypy-boto3-builder 7.23.1" +optional = false +python-versions = ">=3.8" +files = [ + {file = "boto3-stubs-1.34.20.tar.gz", hash = "sha256:431851ea82b8596f8fa49de7014554ae0f2fa2cc6a0954459787a3b938796c68"}, + {file = "boto3_stubs-1.34.20-py3-none-any.whl", hash = "sha256:718b4503625fc5c2cada3b8e78fa10273dfb07359261be6fc07b89e2b6e03cb0"}, +] + +[package.dependencies] +botocore-stubs = "*" +mypy-boto3-s3 = {version = ">=1.34.0,<1.35.0", optional = true, markers = "extra == \"s3\""} +types-s3transfer = "*" +typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.12\""} + +[package.extras] +accessanalyzer = ["mypy-boto3-accessanalyzer (>=1.34.0,<1.35.0)"] +account = ["mypy-boto3-account (>=1.34.0,<1.35.0)"] +acm = ["mypy-boto3-acm (>=1.34.0,<1.35.0)"] +acm-pca = ["mypy-boto3-acm-pca (>=1.34.0,<1.35.0)"] +alexaforbusiness = ["mypy-boto3-alexaforbusiness (>=1.34.0,<1.35.0)"] +all = ["mypy-boto3-accessanalyzer (>=1.34.0,<1.35.0)", "mypy-boto3-account (>=1.34.0,<1.35.0)", "mypy-boto3-acm (>=1.34.0,<1.35.0)", "mypy-boto3-acm-pca (>=1.34.0,<1.35.0)", "mypy-boto3-alexaforbusiness (>=1.34.0,<1.35.0)", "mypy-boto3-amp (>=1.34.0,<1.35.0)", "mypy-boto3-amplify (>=1.34.0,<1.35.0)", "mypy-boto3-amplifybackend (>=1.34.0,<1.35.0)", "mypy-boto3-amplifyuibuilder (>=1.34.0,<1.35.0)", "mypy-boto3-apigateway (>=1.34.0,<1.35.0)", "mypy-boto3-apigatewaymanagementapi (>=1.34.0,<1.35.0)", "mypy-boto3-apigatewayv2 (>=1.34.0,<1.35.0)", "mypy-boto3-appconfig (>=1.34.0,<1.35.0)", "mypy-boto3-appconfigdata (>=1.34.0,<1.35.0)", "mypy-boto3-appfabric (>=1.34.0,<1.35.0)", "mypy-boto3-appflow (>=1.34.0,<1.35.0)", "mypy-boto3-appintegrations (>=1.34.0,<1.35.0)", "mypy-boto3-application-autoscaling (>=1.34.0,<1.35.0)", "mypy-boto3-application-insights (>=1.34.0,<1.35.0)", "mypy-boto3-applicationcostprofiler (>=1.34.0,<1.35.0)", "mypy-boto3-appmesh (>=1.34.0,<1.35.0)", "mypy-boto3-apprunner (>=1.34.0,<1.35.0)", "mypy-boto3-appstream (>=1.34.0,<1.35.0)", "mypy-boto3-appsync (>=1.34.0,<1.35.0)", "mypy-boto3-arc-zonal-shift (>=1.34.0,<1.35.0)", "mypy-boto3-athena (>=1.34.0,<1.35.0)", "mypy-boto3-auditmanager (>=1.34.0,<1.35.0)", "mypy-boto3-autoscaling (>=1.34.0,<1.35.0)", "mypy-boto3-autoscaling-plans (>=1.34.0,<1.35.0)", "mypy-boto3-b2bi (>=1.34.0,<1.35.0)", "mypy-boto3-backup (>=1.34.0,<1.35.0)", "mypy-boto3-backup-gateway (>=1.34.0,<1.35.0)", "mypy-boto3-backupstorage (>=1.34.0,<1.35.0)", "mypy-boto3-batch (>=1.34.0,<1.35.0)", "mypy-boto3-bcm-data-exports (>=1.34.0,<1.35.0)", "mypy-boto3-bedrock (>=1.34.0,<1.35.0)", "mypy-boto3-bedrock-agent (>=1.34.0,<1.35.0)", "mypy-boto3-bedrock-agent-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-bedrock-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-billingconductor (>=1.34.0,<1.35.0)", "mypy-boto3-braket (>=1.34.0,<1.35.0)", "mypy-boto3-budgets (>=1.34.0,<1.35.0)", "mypy-boto3-ce (>=1.34.0,<1.35.0)", "mypy-boto3-chime (>=1.34.0,<1.35.0)", "mypy-boto3-chime-sdk-identity (>=1.34.0,<1.35.0)", "mypy-boto3-chime-sdk-media-pipelines (>=1.34.0,<1.35.0)", "mypy-boto3-chime-sdk-meetings (>=1.34.0,<1.35.0)", "mypy-boto3-chime-sdk-messaging (>=1.34.0,<1.35.0)", "mypy-boto3-chime-sdk-voice (>=1.34.0,<1.35.0)", "mypy-boto3-cleanrooms (>=1.34.0,<1.35.0)", "mypy-boto3-cleanroomsml (>=1.34.0,<1.35.0)", "mypy-boto3-cloud9 (>=1.34.0,<1.35.0)", "mypy-boto3-cloudcontrol (>=1.34.0,<1.35.0)", "mypy-boto3-clouddirectory (>=1.34.0,<1.35.0)", "mypy-boto3-cloudformation (>=1.34.0,<1.35.0)", "mypy-boto3-cloudfront (>=1.34.0,<1.35.0)", "mypy-boto3-cloudfront-keyvaluestore (>=1.34.0,<1.35.0)", "mypy-boto3-cloudhsm (>=1.34.0,<1.35.0)", "mypy-boto3-cloudhsmv2 (>=1.34.0,<1.35.0)", "mypy-boto3-cloudsearch (>=1.34.0,<1.35.0)", "mypy-boto3-cloudsearchdomain (>=1.34.0,<1.35.0)", "mypy-boto3-cloudtrail (>=1.34.0,<1.35.0)", "mypy-boto3-cloudtrail-data (>=1.34.0,<1.35.0)", "mypy-boto3-cloudwatch (>=1.34.0,<1.35.0)", "mypy-boto3-codeartifact (>=1.34.0,<1.35.0)", "mypy-boto3-codebuild (>=1.34.0,<1.35.0)", "mypy-boto3-codecatalyst (>=1.34.0,<1.35.0)", "mypy-boto3-codecommit (>=1.34.0,<1.35.0)", "mypy-boto3-codedeploy (>=1.34.0,<1.35.0)", "mypy-boto3-codeguru-reviewer (>=1.34.0,<1.35.0)", "mypy-boto3-codeguru-security (>=1.34.0,<1.35.0)", "mypy-boto3-codeguruprofiler (>=1.34.0,<1.35.0)", "mypy-boto3-codepipeline (>=1.34.0,<1.35.0)", "mypy-boto3-codestar (>=1.34.0,<1.35.0)", "mypy-boto3-codestar-connections (>=1.34.0,<1.35.0)", "mypy-boto3-codestar-notifications (>=1.34.0,<1.35.0)", "mypy-boto3-cognito-identity (>=1.34.0,<1.35.0)", "mypy-boto3-cognito-idp (>=1.34.0,<1.35.0)", "mypy-boto3-cognito-sync (>=1.34.0,<1.35.0)", "mypy-boto3-comprehend (>=1.34.0,<1.35.0)", "mypy-boto3-comprehendmedical (>=1.34.0,<1.35.0)", "mypy-boto3-compute-optimizer (>=1.34.0,<1.35.0)", "mypy-boto3-config (>=1.34.0,<1.35.0)", "mypy-boto3-connect (>=1.34.0,<1.35.0)", "mypy-boto3-connect-contact-lens (>=1.34.0,<1.35.0)", "mypy-boto3-connectcampaigns (>=1.34.0,<1.35.0)", "mypy-boto3-connectcases (>=1.34.0,<1.35.0)", "mypy-boto3-connectparticipant (>=1.34.0,<1.35.0)", "mypy-boto3-controltower (>=1.34.0,<1.35.0)", "mypy-boto3-cost-optimization-hub (>=1.34.0,<1.35.0)", "mypy-boto3-cur (>=1.34.0,<1.35.0)", "mypy-boto3-customer-profiles (>=1.34.0,<1.35.0)", "mypy-boto3-databrew (>=1.34.0,<1.35.0)", "mypy-boto3-dataexchange (>=1.34.0,<1.35.0)", "mypy-boto3-datapipeline (>=1.34.0,<1.35.0)", "mypy-boto3-datasync (>=1.34.0,<1.35.0)", "mypy-boto3-datazone (>=1.34.0,<1.35.0)", "mypy-boto3-dax (>=1.34.0,<1.35.0)", "mypy-boto3-detective (>=1.34.0,<1.35.0)", "mypy-boto3-devicefarm (>=1.34.0,<1.35.0)", "mypy-boto3-devops-guru (>=1.34.0,<1.35.0)", "mypy-boto3-directconnect (>=1.34.0,<1.35.0)", "mypy-boto3-discovery (>=1.34.0,<1.35.0)", "mypy-boto3-dlm (>=1.34.0,<1.35.0)", "mypy-boto3-dms (>=1.34.0,<1.35.0)", "mypy-boto3-docdb (>=1.34.0,<1.35.0)", "mypy-boto3-docdb-elastic (>=1.34.0,<1.35.0)", "mypy-boto3-drs (>=1.34.0,<1.35.0)", "mypy-boto3-ds (>=1.34.0,<1.35.0)", "mypy-boto3-dynamodb (>=1.34.0,<1.35.0)", "mypy-boto3-dynamodbstreams (>=1.34.0,<1.35.0)", "mypy-boto3-ebs (>=1.34.0,<1.35.0)", "mypy-boto3-ec2 (>=1.34.0,<1.35.0)", "mypy-boto3-ec2-instance-connect (>=1.34.0,<1.35.0)", "mypy-boto3-ecr (>=1.34.0,<1.35.0)", "mypy-boto3-ecr-public (>=1.34.0,<1.35.0)", "mypy-boto3-ecs (>=1.34.0,<1.35.0)", "mypy-boto3-efs (>=1.34.0,<1.35.0)", "mypy-boto3-eks (>=1.34.0,<1.35.0)", "mypy-boto3-eks-auth (>=1.34.0,<1.35.0)", "mypy-boto3-elastic-inference (>=1.34.0,<1.35.0)", "mypy-boto3-elasticache (>=1.34.0,<1.35.0)", "mypy-boto3-elasticbeanstalk (>=1.34.0,<1.35.0)", "mypy-boto3-elastictranscoder (>=1.34.0,<1.35.0)", "mypy-boto3-elb (>=1.34.0,<1.35.0)", "mypy-boto3-elbv2 (>=1.34.0,<1.35.0)", "mypy-boto3-emr (>=1.34.0,<1.35.0)", "mypy-boto3-emr-containers (>=1.34.0,<1.35.0)", "mypy-boto3-emr-serverless (>=1.34.0,<1.35.0)", "mypy-boto3-entityresolution (>=1.34.0,<1.35.0)", "mypy-boto3-es (>=1.34.0,<1.35.0)", "mypy-boto3-events (>=1.34.0,<1.35.0)", "mypy-boto3-evidently (>=1.34.0,<1.35.0)", "mypy-boto3-finspace (>=1.34.0,<1.35.0)", "mypy-boto3-finspace-data (>=1.34.0,<1.35.0)", "mypy-boto3-firehose (>=1.34.0,<1.35.0)", "mypy-boto3-fis (>=1.34.0,<1.35.0)", "mypy-boto3-fms (>=1.34.0,<1.35.0)", "mypy-boto3-forecast (>=1.34.0,<1.35.0)", "mypy-boto3-forecastquery (>=1.34.0,<1.35.0)", "mypy-boto3-frauddetector (>=1.34.0,<1.35.0)", "mypy-boto3-freetier (>=1.34.0,<1.35.0)", "mypy-boto3-fsx (>=1.34.0,<1.35.0)", "mypy-boto3-gamelift (>=1.34.0,<1.35.0)", "mypy-boto3-glacier (>=1.34.0,<1.35.0)", "mypy-boto3-globalaccelerator (>=1.34.0,<1.35.0)", "mypy-boto3-glue (>=1.34.0,<1.35.0)", "mypy-boto3-grafana (>=1.34.0,<1.35.0)", "mypy-boto3-greengrass (>=1.34.0,<1.35.0)", "mypy-boto3-greengrassv2 (>=1.34.0,<1.35.0)", "mypy-boto3-groundstation (>=1.34.0,<1.35.0)", "mypy-boto3-guardduty (>=1.34.0,<1.35.0)", "mypy-boto3-health (>=1.34.0,<1.35.0)", "mypy-boto3-healthlake (>=1.34.0,<1.35.0)", "mypy-boto3-honeycode (>=1.34.0,<1.35.0)", "mypy-boto3-iam (>=1.34.0,<1.35.0)", "mypy-boto3-identitystore (>=1.34.0,<1.35.0)", "mypy-boto3-imagebuilder (>=1.34.0,<1.35.0)", "mypy-boto3-importexport (>=1.34.0,<1.35.0)", "mypy-boto3-inspector (>=1.34.0,<1.35.0)", "mypy-boto3-inspector-scan (>=1.34.0,<1.35.0)", "mypy-boto3-inspector2 (>=1.34.0,<1.35.0)", "mypy-boto3-internetmonitor (>=1.34.0,<1.35.0)", "mypy-boto3-iot (>=1.34.0,<1.35.0)", "mypy-boto3-iot-data (>=1.34.0,<1.35.0)", "mypy-boto3-iot-jobs-data (>=1.34.0,<1.35.0)", "mypy-boto3-iot-roborunner (>=1.34.0,<1.35.0)", "mypy-boto3-iot1click-devices (>=1.34.0,<1.35.0)", "mypy-boto3-iot1click-projects (>=1.34.0,<1.35.0)", "mypy-boto3-iotanalytics (>=1.34.0,<1.35.0)", "mypy-boto3-iotdeviceadvisor (>=1.34.0,<1.35.0)", "mypy-boto3-iotevents (>=1.34.0,<1.35.0)", "mypy-boto3-iotevents-data (>=1.34.0,<1.35.0)", "mypy-boto3-iotfleethub (>=1.34.0,<1.35.0)", "mypy-boto3-iotfleetwise (>=1.34.0,<1.35.0)", "mypy-boto3-iotsecuretunneling (>=1.34.0,<1.35.0)", "mypy-boto3-iotsitewise (>=1.34.0,<1.35.0)", "mypy-boto3-iotthingsgraph (>=1.34.0,<1.35.0)", "mypy-boto3-iottwinmaker (>=1.34.0,<1.35.0)", "mypy-boto3-iotwireless (>=1.34.0,<1.35.0)", "mypy-boto3-ivs (>=1.34.0,<1.35.0)", "mypy-boto3-ivs-realtime (>=1.34.0,<1.35.0)", "mypy-boto3-ivschat (>=1.34.0,<1.35.0)", "mypy-boto3-kafka (>=1.34.0,<1.35.0)", "mypy-boto3-kafkaconnect (>=1.34.0,<1.35.0)", "mypy-boto3-kendra (>=1.34.0,<1.35.0)", "mypy-boto3-kendra-ranking (>=1.34.0,<1.35.0)", "mypy-boto3-keyspaces (>=1.34.0,<1.35.0)", "mypy-boto3-kinesis (>=1.34.0,<1.35.0)", "mypy-boto3-kinesis-video-archived-media (>=1.34.0,<1.35.0)", "mypy-boto3-kinesis-video-media (>=1.34.0,<1.35.0)", "mypy-boto3-kinesis-video-signaling (>=1.34.0,<1.35.0)", "mypy-boto3-kinesis-video-webrtc-storage (>=1.34.0,<1.35.0)", "mypy-boto3-kinesisanalytics (>=1.34.0,<1.35.0)", "mypy-boto3-kinesisanalyticsv2 (>=1.34.0,<1.35.0)", "mypy-boto3-kinesisvideo (>=1.34.0,<1.35.0)", "mypy-boto3-kms (>=1.34.0,<1.35.0)", "mypy-boto3-lakeformation (>=1.34.0,<1.35.0)", "mypy-boto3-lambda (>=1.34.0,<1.35.0)", "mypy-boto3-launch-wizard (>=1.34.0,<1.35.0)", "mypy-boto3-lex-models (>=1.34.0,<1.35.0)", "mypy-boto3-lex-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-lexv2-models (>=1.34.0,<1.35.0)", "mypy-boto3-lexv2-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-license-manager (>=1.34.0,<1.35.0)", "mypy-boto3-license-manager-linux-subscriptions (>=1.34.0,<1.35.0)", "mypy-boto3-license-manager-user-subscriptions (>=1.34.0,<1.35.0)", "mypy-boto3-lightsail (>=1.34.0,<1.35.0)", "mypy-boto3-location (>=1.34.0,<1.35.0)", "mypy-boto3-logs (>=1.34.0,<1.35.0)", "mypy-boto3-lookoutequipment (>=1.34.0,<1.35.0)", "mypy-boto3-lookoutmetrics (>=1.34.0,<1.35.0)", "mypy-boto3-lookoutvision (>=1.34.0,<1.35.0)", "mypy-boto3-m2 (>=1.34.0,<1.35.0)", "mypy-boto3-machinelearning (>=1.34.0,<1.35.0)", "mypy-boto3-macie2 (>=1.34.0,<1.35.0)", "mypy-boto3-managedblockchain (>=1.34.0,<1.35.0)", "mypy-boto3-managedblockchain-query (>=1.34.0,<1.35.0)", "mypy-boto3-marketplace-agreement (>=1.34.0,<1.35.0)", "mypy-boto3-marketplace-catalog (>=1.34.0,<1.35.0)", "mypy-boto3-marketplace-deployment (>=1.34.0,<1.35.0)", "mypy-boto3-marketplace-entitlement (>=1.34.0,<1.35.0)", "mypy-boto3-marketplacecommerceanalytics (>=1.34.0,<1.35.0)", "mypy-boto3-mediaconnect (>=1.34.0,<1.35.0)", "mypy-boto3-mediaconvert (>=1.34.0,<1.35.0)", "mypy-boto3-medialive (>=1.34.0,<1.35.0)", "mypy-boto3-mediapackage (>=1.34.0,<1.35.0)", "mypy-boto3-mediapackage-vod (>=1.34.0,<1.35.0)", "mypy-boto3-mediapackagev2 (>=1.34.0,<1.35.0)", "mypy-boto3-mediastore (>=1.34.0,<1.35.0)", "mypy-boto3-mediastore-data (>=1.34.0,<1.35.0)", "mypy-boto3-mediatailor (>=1.34.0,<1.35.0)", "mypy-boto3-medical-imaging (>=1.34.0,<1.35.0)", "mypy-boto3-memorydb (>=1.34.0,<1.35.0)", "mypy-boto3-meteringmarketplace (>=1.34.0,<1.35.0)", "mypy-boto3-mgh (>=1.34.0,<1.35.0)", "mypy-boto3-mgn (>=1.34.0,<1.35.0)", "mypy-boto3-migration-hub-refactor-spaces (>=1.34.0,<1.35.0)", "mypy-boto3-migrationhub-config (>=1.34.0,<1.35.0)", "mypy-boto3-migrationhuborchestrator (>=1.34.0,<1.35.0)", "mypy-boto3-migrationhubstrategy (>=1.34.0,<1.35.0)", "mypy-boto3-mobile (>=1.34.0,<1.35.0)", "mypy-boto3-mq (>=1.34.0,<1.35.0)", "mypy-boto3-mturk (>=1.34.0,<1.35.0)", "mypy-boto3-mwaa (>=1.34.0,<1.35.0)", "mypy-boto3-neptune (>=1.34.0,<1.35.0)", "mypy-boto3-neptune-graph (>=1.34.0,<1.35.0)", "mypy-boto3-neptunedata (>=1.34.0,<1.35.0)", "mypy-boto3-network-firewall (>=1.34.0,<1.35.0)", "mypy-boto3-networkmanager (>=1.34.0,<1.35.0)", "mypy-boto3-networkmonitor (>=1.34.0,<1.35.0)", "mypy-boto3-nimble (>=1.34.0,<1.35.0)", "mypy-boto3-oam (>=1.34.0,<1.35.0)", "mypy-boto3-omics (>=1.34.0,<1.35.0)", "mypy-boto3-opensearch (>=1.34.0,<1.35.0)", "mypy-boto3-opensearchserverless (>=1.34.0,<1.35.0)", "mypy-boto3-opsworks (>=1.34.0,<1.35.0)", "mypy-boto3-opsworkscm (>=1.34.0,<1.35.0)", "mypy-boto3-organizations (>=1.34.0,<1.35.0)", "mypy-boto3-osis (>=1.34.0,<1.35.0)", "mypy-boto3-outposts (>=1.34.0,<1.35.0)", "mypy-boto3-panorama (>=1.34.0,<1.35.0)", "mypy-boto3-payment-cryptography (>=1.34.0,<1.35.0)", "mypy-boto3-payment-cryptography-data (>=1.34.0,<1.35.0)", "mypy-boto3-pca-connector-ad (>=1.34.0,<1.35.0)", "mypy-boto3-personalize (>=1.34.0,<1.35.0)", "mypy-boto3-personalize-events (>=1.34.0,<1.35.0)", "mypy-boto3-personalize-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-pi (>=1.34.0,<1.35.0)", "mypy-boto3-pinpoint (>=1.34.0,<1.35.0)", "mypy-boto3-pinpoint-email (>=1.34.0,<1.35.0)", "mypy-boto3-pinpoint-sms-voice (>=1.34.0,<1.35.0)", "mypy-boto3-pinpoint-sms-voice-v2 (>=1.34.0,<1.35.0)", "mypy-boto3-pipes (>=1.34.0,<1.35.0)", "mypy-boto3-polly (>=1.34.0,<1.35.0)", "mypy-boto3-pricing (>=1.34.0,<1.35.0)", "mypy-boto3-privatenetworks (>=1.34.0,<1.35.0)", "mypy-boto3-proton (>=1.34.0,<1.35.0)", "mypy-boto3-qbusiness (>=1.34.0,<1.35.0)", "mypy-boto3-qconnect (>=1.34.0,<1.35.0)", "mypy-boto3-qldb (>=1.34.0,<1.35.0)", "mypy-boto3-qldb-session (>=1.34.0,<1.35.0)", "mypy-boto3-quicksight (>=1.34.0,<1.35.0)", "mypy-boto3-ram (>=1.34.0,<1.35.0)", "mypy-boto3-rbin (>=1.34.0,<1.35.0)", "mypy-boto3-rds (>=1.34.0,<1.35.0)", "mypy-boto3-rds-data (>=1.34.0,<1.35.0)", "mypy-boto3-redshift (>=1.34.0,<1.35.0)", "mypy-boto3-redshift-data (>=1.34.0,<1.35.0)", "mypy-boto3-redshift-serverless (>=1.34.0,<1.35.0)", "mypy-boto3-rekognition (>=1.34.0,<1.35.0)", "mypy-boto3-repostspace (>=1.34.0,<1.35.0)", "mypy-boto3-resiliencehub (>=1.34.0,<1.35.0)", "mypy-boto3-resource-explorer-2 (>=1.34.0,<1.35.0)", "mypy-boto3-resource-groups (>=1.34.0,<1.35.0)", "mypy-boto3-resourcegroupstaggingapi (>=1.34.0,<1.35.0)", "mypy-boto3-robomaker (>=1.34.0,<1.35.0)", "mypy-boto3-rolesanywhere (>=1.34.0,<1.35.0)", "mypy-boto3-route53 (>=1.34.0,<1.35.0)", "mypy-boto3-route53-recovery-cluster (>=1.34.0,<1.35.0)", "mypy-boto3-route53-recovery-control-config (>=1.34.0,<1.35.0)", "mypy-boto3-route53-recovery-readiness (>=1.34.0,<1.35.0)", "mypy-boto3-route53domains (>=1.34.0,<1.35.0)", "mypy-boto3-route53resolver (>=1.34.0,<1.35.0)", "mypy-boto3-rum (>=1.34.0,<1.35.0)", "mypy-boto3-s3 (>=1.34.0,<1.35.0)", "mypy-boto3-s3control (>=1.34.0,<1.35.0)", "mypy-boto3-s3outposts (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-a2i-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-edge (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-featurestore-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-geospatial (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-metrics (>=1.34.0,<1.35.0)", "mypy-boto3-sagemaker-runtime (>=1.34.0,<1.35.0)", "mypy-boto3-savingsplans (>=1.34.0,<1.35.0)", "mypy-boto3-scheduler (>=1.34.0,<1.35.0)", "mypy-boto3-schemas (>=1.34.0,<1.35.0)", "mypy-boto3-sdb (>=1.34.0,<1.35.0)", "mypy-boto3-secretsmanager (>=1.34.0,<1.35.0)", "mypy-boto3-securityhub (>=1.34.0,<1.35.0)", "mypy-boto3-securitylake (>=1.34.0,<1.35.0)", "mypy-boto3-serverlessrepo (>=1.34.0,<1.35.0)", "mypy-boto3-service-quotas (>=1.34.0,<1.35.0)", "mypy-boto3-servicecatalog (>=1.34.0,<1.35.0)", "mypy-boto3-servicecatalog-appregistry (>=1.34.0,<1.35.0)", "mypy-boto3-servicediscovery (>=1.34.0,<1.35.0)", "mypy-boto3-ses (>=1.34.0,<1.35.0)", "mypy-boto3-sesv2 (>=1.34.0,<1.35.0)", "mypy-boto3-shield (>=1.34.0,<1.35.0)", "mypy-boto3-signer (>=1.34.0,<1.35.0)", "mypy-boto3-simspaceweaver (>=1.34.0,<1.35.0)", "mypy-boto3-sms (>=1.34.0,<1.35.0)", "mypy-boto3-sms-voice (>=1.34.0,<1.35.0)", "mypy-boto3-snow-device-management (>=1.34.0,<1.35.0)", "mypy-boto3-snowball (>=1.34.0,<1.35.0)", "mypy-boto3-sns (>=1.34.0,<1.35.0)", "mypy-boto3-sqs (>=1.34.0,<1.35.0)", "mypy-boto3-ssm (>=1.34.0,<1.35.0)", "mypy-boto3-ssm-contacts (>=1.34.0,<1.35.0)", "mypy-boto3-ssm-incidents (>=1.34.0,<1.35.0)", "mypy-boto3-ssm-sap (>=1.34.0,<1.35.0)", "mypy-boto3-sso (>=1.34.0,<1.35.0)", "mypy-boto3-sso-admin (>=1.34.0,<1.35.0)", "mypy-boto3-sso-oidc (>=1.34.0,<1.35.0)", "mypy-boto3-stepfunctions (>=1.34.0,<1.35.0)", "mypy-boto3-storagegateway (>=1.34.0,<1.35.0)", "mypy-boto3-sts (>=1.34.0,<1.35.0)", "mypy-boto3-supplychain (>=1.34.0,<1.35.0)", "mypy-boto3-support (>=1.34.0,<1.35.0)", "mypy-boto3-support-app (>=1.34.0,<1.35.0)", "mypy-boto3-swf (>=1.34.0,<1.35.0)", "mypy-boto3-synthetics (>=1.34.0,<1.35.0)", "mypy-boto3-textract (>=1.34.0,<1.35.0)", "mypy-boto3-timestream-query (>=1.34.0,<1.35.0)", "mypy-boto3-timestream-write (>=1.34.0,<1.35.0)", "mypy-boto3-tnb (>=1.34.0,<1.35.0)", "mypy-boto3-transcribe (>=1.34.0,<1.35.0)", "mypy-boto3-transfer (>=1.34.0,<1.35.0)", "mypy-boto3-translate (>=1.34.0,<1.35.0)", "mypy-boto3-trustedadvisor (>=1.34.0,<1.35.0)", "mypy-boto3-verifiedpermissions (>=1.34.0,<1.35.0)", "mypy-boto3-voice-id (>=1.34.0,<1.35.0)", "mypy-boto3-vpc-lattice (>=1.34.0,<1.35.0)", "mypy-boto3-waf (>=1.34.0,<1.35.0)", "mypy-boto3-waf-regional (>=1.34.0,<1.35.0)", "mypy-boto3-wafv2 (>=1.34.0,<1.35.0)", "mypy-boto3-wellarchitected (>=1.34.0,<1.35.0)", "mypy-boto3-wisdom (>=1.34.0,<1.35.0)", "mypy-boto3-workdocs (>=1.34.0,<1.35.0)", "mypy-boto3-worklink (>=1.34.0,<1.35.0)", "mypy-boto3-workmail (>=1.34.0,<1.35.0)", "mypy-boto3-workmailmessageflow (>=1.34.0,<1.35.0)", "mypy-boto3-workspaces (>=1.34.0,<1.35.0)", "mypy-boto3-workspaces-thin-client (>=1.34.0,<1.35.0)", "mypy-boto3-workspaces-web (>=1.34.0,<1.35.0)", "mypy-boto3-xray (>=1.34.0,<1.35.0)"] +amp = ["mypy-boto3-amp (>=1.34.0,<1.35.0)"] +amplify = ["mypy-boto3-amplify (>=1.34.0,<1.35.0)"] +amplifybackend = ["mypy-boto3-amplifybackend (>=1.34.0,<1.35.0)"] +amplifyuibuilder = ["mypy-boto3-amplifyuibuilder (>=1.34.0,<1.35.0)"] +apigateway = ["mypy-boto3-apigateway (>=1.34.0,<1.35.0)"] +apigatewaymanagementapi = ["mypy-boto3-apigatewaymanagementapi (>=1.34.0,<1.35.0)"] +apigatewayv2 = ["mypy-boto3-apigatewayv2 (>=1.34.0,<1.35.0)"] +appconfig = ["mypy-boto3-appconfig (>=1.34.0,<1.35.0)"] +appconfigdata = ["mypy-boto3-appconfigdata (>=1.34.0,<1.35.0)"] +appfabric = ["mypy-boto3-appfabric (>=1.34.0,<1.35.0)"] +appflow = ["mypy-boto3-appflow (>=1.34.0,<1.35.0)"] +appintegrations = ["mypy-boto3-appintegrations (>=1.34.0,<1.35.0)"] +application-autoscaling = ["mypy-boto3-application-autoscaling (>=1.34.0,<1.35.0)"] +application-insights = ["mypy-boto3-application-insights (>=1.34.0,<1.35.0)"] +applicationcostprofiler = ["mypy-boto3-applicationcostprofiler (>=1.34.0,<1.35.0)"] +appmesh = ["mypy-boto3-appmesh (>=1.34.0,<1.35.0)"] +apprunner = ["mypy-boto3-apprunner (>=1.34.0,<1.35.0)"] +appstream = ["mypy-boto3-appstream (>=1.34.0,<1.35.0)"] +appsync = ["mypy-boto3-appsync (>=1.34.0,<1.35.0)"] +arc-zonal-shift = ["mypy-boto3-arc-zonal-shift (>=1.34.0,<1.35.0)"] +athena = ["mypy-boto3-athena (>=1.34.0,<1.35.0)"] +auditmanager = ["mypy-boto3-auditmanager (>=1.34.0,<1.35.0)"] +autoscaling = ["mypy-boto3-autoscaling (>=1.34.0,<1.35.0)"] +autoscaling-plans = ["mypy-boto3-autoscaling-plans (>=1.34.0,<1.35.0)"] +b2bi = ["mypy-boto3-b2bi (>=1.34.0,<1.35.0)"] +backup = ["mypy-boto3-backup (>=1.34.0,<1.35.0)"] +backup-gateway = ["mypy-boto3-backup-gateway (>=1.34.0,<1.35.0)"] +backupstorage = ["mypy-boto3-backupstorage (>=1.34.0,<1.35.0)"] +batch = ["mypy-boto3-batch (>=1.34.0,<1.35.0)"] +bcm-data-exports = ["mypy-boto3-bcm-data-exports (>=1.34.0,<1.35.0)"] +bedrock = ["mypy-boto3-bedrock (>=1.34.0,<1.35.0)"] +bedrock-agent = ["mypy-boto3-bedrock-agent (>=1.34.0,<1.35.0)"] +bedrock-agent-runtime = ["mypy-boto3-bedrock-agent-runtime (>=1.34.0,<1.35.0)"] +bedrock-runtime = ["mypy-boto3-bedrock-runtime (>=1.34.0,<1.35.0)"] +billingconductor = ["mypy-boto3-billingconductor (>=1.34.0,<1.35.0)"] +boto3 = ["boto3 (==1.34.20)", "botocore (==1.34.20)"] +braket = ["mypy-boto3-braket (>=1.34.0,<1.35.0)"] +budgets = ["mypy-boto3-budgets (>=1.34.0,<1.35.0)"] +ce = ["mypy-boto3-ce (>=1.34.0,<1.35.0)"] +chime = ["mypy-boto3-chime (>=1.34.0,<1.35.0)"] +chime-sdk-identity = ["mypy-boto3-chime-sdk-identity (>=1.34.0,<1.35.0)"] +chime-sdk-media-pipelines = ["mypy-boto3-chime-sdk-media-pipelines (>=1.34.0,<1.35.0)"] +chime-sdk-meetings = ["mypy-boto3-chime-sdk-meetings (>=1.34.0,<1.35.0)"] +chime-sdk-messaging = ["mypy-boto3-chime-sdk-messaging (>=1.34.0,<1.35.0)"] +chime-sdk-voice = ["mypy-boto3-chime-sdk-voice (>=1.34.0,<1.35.0)"] +cleanrooms = ["mypy-boto3-cleanrooms (>=1.34.0,<1.35.0)"] +cleanroomsml = ["mypy-boto3-cleanroomsml (>=1.34.0,<1.35.0)"] +cloud9 = ["mypy-boto3-cloud9 (>=1.34.0,<1.35.0)"] +cloudcontrol = ["mypy-boto3-cloudcontrol (>=1.34.0,<1.35.0)"] +clouddirectory = ["mypy-boto3-clouddirectory (>=1.34.0,<1.35.0)"] +cloudformation = ["mypy-boto3-cloudformation (>=1.34.0,<1.35.0)"] +cloudfront = ["mypy-boto3-cloudfront (>=1.34.0,<1.35.0)"] +cloudfront-keyvaluestore = ["mypy-boto3-cloudfront-keyvaluestore (>=1.34.0,<1.35.0)"] +cloudhsm = ["mypy-boto3-cloudhsm (>=1.34.0,<1.35.0)"] +cloudhsmv2 = ["mypy-boto3-cloudhsmv2 (>=1.34.0,<1.35.0)"] +cloudsearch = ["mypy-boto3-cloudsearch (>=1.34.0,<1.35.0)"] +cloudsearchdomain = ["mypy-boto3-cloudsearchdomain (>=1.34.0,<1.35.0)"] +cloudtrail = ["mypy-boto3-cloudtrail (>=1.34.0,<1.35.0)"] +cloudtrail-data = ["mypy-boto3-cloudtrail-data (>=1.34.0,<1.35.0)"] +cloudwatch = ["mypy-boto3-cloudwatch (>=1.34.0,<1.35.0)"] +codeartifact = ["mypy-boto3-codeartifact (>=1.34.0,<1.35.0)"] +codebuild = ["mypy-boto3-codebuild (>=1.34.0,<1.35.0)"] +codecatalyst = ["mypy-boto3-codecatalyst (>=1.34.0,<1.35.0)"] +codecommit = ["mypy-boto3-codecommit (>=1.34.0,<1.35.0)"] +codedeploy = ["mypy-boto3-codedeploy (>=1.34.0,<1.35.0)"] +codeguru-reviewer = ["mypy-boto3-codeguru-reviewer (>=1.34.0,<1.35.0)"] +codeguru-security = ["mypy-boto3-codeguru-security (>=1.34.0,<1.35.0)"] +codeguruprofiler = ["mypy-boto3-codeguruprofiler (>=1.34.0,<1.35.0)"] +codepipeline = ["mypy-boto3-codepipeline (>=1.34.0,<1.35.0)"] +codestar = ["mypy-boto3-codestar (>=1.34.0,<1.35.0)"] +codestar-connections = ["mypy-boto3-codestar-connections (>=1.34.0,<1.35.0)"] +codestar-notifications = ["mypy-boto3-codestar-notifications (>=1.34.0,<1.35.0)"] +cognito-identity = ["mypy-boto3-cognito-identity (>=1.34.0,<1.35.0)"] +cognito-idp = ["mypy-boto3-cognito-idp (>=1.34.0,<1.35.0)"] +cognito-sync = ["mypy-boto3-cognito-sync (>=1.34.0,<1.35.0)"] +comprehend = ["mypy-boto3-comprehend (>=1.34.0,<1.35.0)"] +comprehendmedical = ["mypy-boto3-comprehendmedical (>=1.34.0,<1.35.0)"] +compute-optimizer = ["mypy-boto3-compute-optimizer (>=1.34.0,<1.35.0)"] +config = ["mypy-boto3-config (>=1.34.0,<1.35.0)"] +connect = ["mypy-boto3-connect (>=1.34.0,<1.35.0)"] +connect-contact-lens = ["mypy-boto3-connect-contact-lens (>=1.34.0,<1.35.0)"] +connectcampaigns = ["mypy-boto3-connectcampaigns (>=1.34.0,<1.35.0)"] +connectcases = ["mypy-boto3-connectcases (>=1.34.0,<1.35.0)"] +connectparticipant = ["mypy-boto3-connectparticipant (>=1.34.0,<1.35.0)"] +controltower = ["mypy-boto3-controltower (>=1.34.0,<1.35.0)"] +cost-optimization-hub = ["mypy-boto3-cost-optimization-hub (>=1.34.0,<1.35.0)"] +cur = ["mypy-boto3-cur (>=1.34.0,<1.35.0)"] +customer-profiles = ["mypy-boto3-customer-profiles (>=1.34.0,<1.35.0)"] +databrew = ["mypy-boto3-databrew (>=1.34.0,<1.35.0)"] +dataexchange = ["mypy-boto3-dataexchange (>=1.34.0,<1.35.0)"] +datapipeline = ["mypy-boto3-datapipeline (>=1.34.0,<1.35.0)"] +datasync = ["mypy-boto3-datasync (>=1.34.0,<1.35.0)"] +datazone = ["mypy-boto3-datazone (>=1.34.0,<1.35.0)"] +dax = ["mypy-boto3-dax (>=1.34.0,<1.35.0)"] +detective = ["mypy-boto3-detective (>=1.34.0,<1.35.0)"] +devicefarm = ["mypy-boto3-devicefarm (>=1.34.0,<1.35.0)"] +devops-guru = ["mypy-boto3-devops-guru (>=1.34.0,<1.35.0)"] +directconnect = ["mypy-boto3-directconnect (>=1.34.0,<1.35.0)"] +discovery = ["mypy-boto3-discovery (>=1.34.0,<1.35.0)"] +dlm = ["mypy-boto3-dlm (>=1.34.0,<1.35.0)"] +dms = ["mypy-boto3-dms (>=1.34.0,<1.35.0)"] +docdb = ["mypy-boto3-docdb (>=1.34.0,<1.35.0)"] +docdb-elastic = ["mypy-boto3-docdb-elastic (>=1.34.0,<1.35.0)"] +drs = ["mypy-boto3-drs (>=1.34.0,<1.35.0)"] +ds = ["mypy-boto3-ds (>=1.34.0,<1.35.0)"] +dynamodb = ["mypy-boto3-dynamodb (>=1.34.0,<1.35.0)"] +dynamodbstreams = ["mypy-boto3-dynamodbstreams (>=1.34.0,<1.35.0)"] +ebs = ["mypy-boto3-ebs (>=1.34.0,<1.35.0)"] +ec2 = ["mypy-boto3-ec2 (>=1.34.0,<1.35.0)"] +ec2-instance-connect = ["mypy-boto3-ec2-instance-connect (>=1.34.0,<1.35.0)"] +ecr = ["mypy-boto3-ecr (>=1.34.0,<1.35.0)"] +ecr-public = ["mypy-boto3-ecr-public (>=1.34.0,<1.35.0)"] +ecs = ["mypy-boto3-ecs (>=1.34.0,<1.35.0)"] +efs = ["mypy-boto3-efs (>=1.34.0,<1.35.0)"] +eks = ["mypy-boto3-eks (>=1.34.0,<1.35.0)"] +eks-auth = ["mypy-boto3-eks-auth (>=1.34.0,<1.35.0)"] +elastic-inference = ["mypy-boto3-elastic-inference (>=1.34.0,<1.35.0)"] +elasticache = ["mypy-boto3-elasticache (>=1.34.0,<1.35.0)"] +elasticbeanstalk = ["mypy-boto3-elasticbeanstalk (>=1.34.0,<1.35.0)"] +elastictranscoder = ["mypy-boto3-elastictranscoder (>=1.34.0,<1.35.0)"] +elb = ["mypy-boto3-elb (>=1.34.0,<1.35.0)"] +elbv2 = ["mypy-boto3-elbv2 (>=1.34.0,<1.35.0)"] +emr = ["mypy-boto3-emr (>=1.34.0,<1.35.0)"] +emr-containers = ["mypy-boto3-emr-containers (>=1.34.0,<1.35.0)"] +emr-serverless = ["mypy-boto3-emr-serverless (>=1.34.0,<1.35.0)"] +entityresolution = ["mypy-boto3-entityresolution (>=1.34.0,<1.35.0)"] +es = ["mypy-boto3-es (>=1.34.0,<1.35.0)"] +essential = ["mypy-boto3-cloudformation (>=1.34.0,<1.35.0)", "mypy-boto3-dynamodb (>=1.34.0,<1.35.0)", "mypy-boto3-ec2 (>=1.34.0,<1.35.0)", "mypy-boto3-lambda (>=1.34.0,<1.35.0)", "mypy-boto3-rds (>=1.34.0,<1.35.0)", "mypy-boto3-s3 (>=1.34.0,<1.35.0)", "mypy-boto3-sqs (>=1.34.0,<1.35.0)"] +events = ["mypy-boto3-events (>=1.34.0,<1.35.0)"] +evidently = ["mypy-boto3-evidently (>=1.34.0,<1.35.0)"] +finspace = ["mypy-boto3-finspace (>=1.34.0,<1.35.0)"] +finspace-data = ["mypy-boto3-finspace-data (>=1.34.0,<1.35.0)"] +firehose = ["mypy-boto3-firehose (>=1.34.0,<1.35.0)"] +fis = ["mypy-boto3-fis (>=1.34.0,<1.35.0)"] +fms = ["mypy-boto3-fms (>=1.34.0,<1.35.0)"] +forecast = ["mypy-boto3-forecast (>=1.34.0,<1.35.0)"] +forecastquery = ["mypy-boto3-forecastquery (>=1.34.0,<1.35.0)"] +frauddetector = ["mypy-boto3-frauddetector (>=1.34.0,<1.35.0)"] +freetier = ["mypy-boto3-freetier (>=1.34.0,<1.35.0)"] +fsx = ["mypy-boto3-fsx (>=1.34.0,<1.35.0)"] +gamelift = ["mypy-boto3-gamelift (>=1.34.0,<1.35.0)"] +glacier = ["mypy-boto3-glacier (>=1.34.0,<1.35.0)"] +globalaccelerator = ["mypy-boto3-globalaccelerator (>=1.34.0,<1.35.0)"] +glue = ["mypy-boto3-glue (>=1.34.0,<1.35.0)"] +grafana = ["mypy-boto3-grafana (>=1.34.0,<1.35.0)"] +greengrass = ["mypy-boto3-greengrass (>=1.34.0,<1.35.0)"] +greengrassv2 = ["mypy-boto3-greengrassv2 (>=1.34.0,<1.35.0)"] +groundstation = ["mypy-boto3-groundstation (>=1.34.0,<1.35.0)"] +guardduty = ["mypy-boto3-guardduty (>=1.34.0,<1.35.0)"] +health = ["mypy-boto3-health (>=1.34.0,<1.35.0)"] +healthlake = ["mypy-boto3-healthlake (>=1.34.0,<1.35.0)"] +honeycode = ["mypy-boto3-honeycode (>=1.34.0,<1.35.0)"] +iam = ["mypy-boto3-iam (>=1.34.0,<1.35.0)"] +identitystore = ["mypy-boto3-identitystore (>=1.34.0,<1.35.0)"] +imagebuilder = ["mypy-boto3-imagebuilder (>=1.34.0,<1.35.0)"] +importexport = ["mypy-boto3-importexport (>=1.34.0,<1.35.0)"] +inspector = ["mypy-boto3-inspector (>=1.34.0,<1.35.0)"] +inspector-scan = ["mypy-boto3-inspector-scan (>=1.34.0,<1.35.0)"] +inspector2 = ["mypy-boto3-inspector2 (>=1.34.0,<1.35.0)"] +internetmonitor = ["mypy-boto3-internetmonitor (>=1.34.0,<1.35.0)"] +iot = ["mypy-boto3-iot (>=1.34.0,<1.35.0)"] +iot-data = ["mypy-boto3-iot-data (>=1.34.0,<1.35.0)"] +iot-jobs-data = ["mypy-boto3-iot-jobs-data (>=1.34.0,<1.35.0)"] +iot-roborunner = ["mypy-boto3-iot-roborunner (>=1.34.0,<1.35.0)"] +iot1click-devices = ["mypy-boto3-iot1click-devices (>=1.34.0,<1.35.0)"] +iot1click-projects = ["mypy-boto3-iot1click-projects (>=1.34.0,<1.35.0)"] +iotanalytics = ["mypy-boto3-iotanalytics (>=1.34.0,<1.35.0)"] +iotdeviceadvisor = ["mypy-boto3-iotdeviceadvisor (>=1.34.0,<1.35.0)"] +iotevents = ["mypy-boto3-iotevents (>=1.34.0,<1.35.0)"] +iotevents-data = ["mypy-boto3-iotevents-data (>=1.34.0,<1.35.0)"] +iotfleethub = ["mypy-boto3-iotfleethub (>=1.34.0,<1.35.0)"] +iotfleetwise = ["mypy-boto3-iotfleetwise (>=1.34.0,<1.35.0)"] +iotsecuretunneling = ["mypy-boto3-iotsecuretunneling (>=1.34.0,<1.35.0)"] +iotsitewise = ["mypy-boto3-iotsitewise (>=1.34.0,<1.35.0)"] +iotthingsgraph = ["mypy-boto3-iotthingsgraph (>=1.34.0,<1.35.0)"] +iottwinmaker = ["mypy-boto3-iottwinmaker (>=1.34.0,<1.35.0)"] +iotwireless = ["mypy-boto3-iotwireless (>=1.34.0,<1.35.0)"] +ivs = ["mypy-boto3-ivs (>=1.34.0,<1.35.0)"] +ivs-realtime = ["mypy-boto3-ivs-realtime (>=1.34.0,<1.35.0)"] +ivschat = ["mypy-boto3-ivschat (>=1.34.0,<1.35.0)"] +kafka = ["mypy-boto3-kafka (>=1.34.0,<1.35.0)"] +kafkaconnect = ["mypy-boto3-kafkaconnect (>=1.34.0,<1.35.0)"] +kendra = ["mypy-boto3-kendra (>=1.34.0,<1.35.0)"] +kendra-ranking = ["mypy-boto3-kendra-ranking (>=1.34.0,<1.35.0)"] +keyspaces = ["mypy-boto3-keyspaces (>=1.34.0,<1.35.0)"] +kinesis = ["mypy-boto3-kinesis (>=1.34.0,<1.35.0)"] +kinesis-video-archived-media = ["mypy-boto3-kinesis-video-archived-media (>=1.34.0,<1.35.0)"] +kinesis-video-media = ["mypy-boto3-kinesis-video-media (>=1.34.0,<1.35.0)"] +kinesis-video-signaling = ["mypy-boto3-kinesis-video-signaling (>=1.34.0,<1.35.0)"] +kinesis-video-webrtc-storage = ["mypy-boto3-kinesis-video-webrtc-storage (>=1.34.0,<1.35.0)"] +kinesisanalytics = ["mypy-boto3-kinesisanalytics (>=1.34.0,<1.35.0)"] +kinesisanalyticsv2 = ["mypy-boto3-kinesisanalyticsv2 (>=1.34.0,<1.35.0)"] +kinesisvideo = ["mypy-boto3-kinesisvideo (>=1.34.0,<1.35.0)"] +kms = ["mypy-boto3-kms (>=1.34.0,<1.35.0)"] +lakeformation = ["mypy-boto3-lakeformation (>=1.34.0,<1.35.0)"] +lambda = ["mypy-boto3-lambda (>=1.34.0,<1.35.0)"] +launch-wizard = ["mypy-boto3-launch-wizard (>=1.34.0,<1.35.0)"] +lex-models = ["mypy-boto3-lex-models (>=1.34.0,<1.35.0)"] +lex-runtime = ["mypy-boto3-lex-runtime (>=1.34.0,<1.35.0)"] +lexv2-models = ["mypy-boto3-lexv2-models (>=1.34.0,<1.35.0)"] +lexv2-runtime = ["mypy-boto3-lexv2-runtime (>=1.34.0,<1.35.0)"] +license-manager = ["mypy-boto3-license-manager (>=1.34.0,<1.35.0)"] +license-manager-linux-subscriptions = ["mypy-boto3-license-manager-linux-subscriptions (>=1.34.0,<1.35.0)"] +license-manager-user-subscriptions = ["mypy-boto3-license-manager-user-subscriptions (>=1.34.0,<1.35.0)"] +lightsail = ["mypy-boto3-lightsail (>=1.34.0,<1.35.0)"] +location = ["mypy-boto3-location (>=1.34.0,<1.35.0)"] +logs = ["mypy-boto3-logs (>=1.34.0,<1.35.0)"] +lookoutequipment = ["mypy-boto3-lookoutequipment (>=1.34.0,<1.35.0)"] +lookoutmetrics = ["mypy-boto3-lookoutmetrics (>=1.34.0,<1.35.0)"] +lookoutvision = ["mypy-boto3-lookoutvision (>=1.34.0,<1.35.0)"] +m2 = ["mypy-boto3-m2 (>=1.34.0,<1.35.0)"] +machinelearning = ["mypy-boto3-machinelearning (>=1.34.0,<1.35.0)"] +macie2 = ["mypy-boto3-macie2 (>=1.34.0,<1.35.0)"] +managedblockchain = ["mypy-boto3-managedblockchain (>=1.34.0,<1.35.0)"] +managedblockchain-query = ["mypy-boto3-managedblockchain-query (>=1.34.0,<1.35.0)"] +marketplace-agreement = ["mypy-boto3-marketplace-agreement (>=1.34.0,<1.35.0)"] +marketplace-catalog = ["mypy-boto3-marketplace-catalog (>=1.34.0,<1.35.0)"] +marketplace-deployment = ["mypy-boto3-marketplace-deployment (>=1.34.0,<1.35.0)"] +marketplace-entitlement = ["mypy-boto3-marketplace-entitlement (>=1.34.0,<1.35.0)"] +marketplacecommerceanalytics = ["mypy-boto3-marketplacecommerceanalytics (>=1.34.0,<1.35.0)"] +mediaconnect = ["mypy-boto3-mediaconnect (>=1.34.0,<1.35.0)"] +mediaconvert = ["mypy-boto3-mediaconvert (>=1.34.0,<1.35.0)"] +medialive = ["mypy-boto3-medialive (>=1.34.0,<1.35.0)"] +mediapackage = ["mypy-boto3-mediapackage (>=1.34.0,<1.35.0)"] +mediapackage-vod = ["mypy-boto3-mediapackage-vod (>=1.34.0,<1.35.0)"] +mediapackagev2 = ["mypy-boto3-mediapackagev2 (>=1.34.0,<1.35.0)"] +mediastore = ["mypy-boto3-mediastore (>=1.34.0,<1.35.0)"] +mediastore-data = ["mypy-boto3-mediastore-data (>=1.34.0,<1.35.0)"] +mediatailor = ["mypy-boto3-mediatailor (>=1.34.0,<1.35.0)"] +medical-imaging = ["mypy-boto3-medical-imaging (>=1.34.0,<1.35.0)"] +memorydb = ["mypy-boto3-memorydb (>=1.34.0,<1.35.0)"] +meteringmarketplace = ["mypy-boto3-meteringmarketplace (>=1.34.0,<1.35.0)"] +mgh = ["mypy-boto3-mgh (>=1.34.0,<1.35.0)"] +mgn = ["mypy-boto3-mgn (>=1.34.0,<1.35.0)"] +migration-hub-refactor-spaces = ["mypy-boto3-migration-hub-refactor-spaces (>=1.34.0,<1.35.0)"] +migrationhub-config = ["mypy-boto3-migrationhub-config (>=1.34.0,<1.35.0)"] +migrationhuborchestrator = ["mypy-boto3-migrationhuborchestrator (>=1.34.0,<1.35.0)"] +migrationhubstrategy = ["mypy-boto3-migrationhubstrategy (>=1.34.0,<1.35.0)"] +mobile = ["mypy-boto3-mobile (>=1.34.0,<1.35.0)"] +mq = ["mypy-boto3-mq (>=1.34.0,<1.35.0)"] +mturk = ["mypy-boto3-mturk (>=1.34.0,<1.35.0)"] +mwaa = ["mypy-boto3-mwaa (>=1.34.0,<1.35.0)"] +neptune = ["mypy-boto3-neptune (>=1.34.0,<1.35.0)"] +neptune-graph = ["mypy-boto3-neptune-graph (>=1.34.0,<1.35.0)"] +neptunedata = ["mypy-boto3-neptunedata (>=1.34.0,<1.35.0)"] +network-firewall = ["mypy-boto3-network-firewall (>=1.34.0,<1.35.0)"] +networkmanager = ["mypy-boto3-networkmanager (>=1.34.0,<1.35.0)"] +networkmonitor = ["mypy-boto3-networkmonitor (>=1.34.0,<1.35.0)"] +nimble = ["mypy-boto3-nimble (>=1.34.0,<1.35.0)"] +oam = ["mypy-boto3-oam (>=1.34.0,<1.35.0)"] +omics = ["mypy-boto3-omics (>=1.34.0,<1.35.0)"] +opensearch = ["mypy-boto3-opensearch (>=1.34.0,<1.35.0)"] +opensearchserverless = ["mypy-boto3-opensearchserverless (>=1.34.0,<1.35.0)"] +opsworks = ["mypy-boto3-opsworks (>=1.34.0,<1.35.0)"] +opsworkscm = ["mypy-boto3-opsworkscm (>=1.34.0,<1.35.0)"] +organizations = ["mypy-boto3-organizations (>=1.34.0,<1.35.0)"] +osis = ["mypy-boto3-osis (>=1.34.0,<1.35.0)"] +outposts = ["mypy-boto3-outposts (>=1.34.0,<1.35.0)"] +panorama = ["mypy-boto3-panorama (>=1.34.0,<1.35.0)"] +payment-cryptography = ["mypy-boto3-payment-cryptography (>=1.34.0,<1.35.0)"] +payment-cryptography-data = ["mypy-boto3-payment-cryptography-data (>=1.34.0,<1.35.0)"] +pca-connector-ad = ["mypy-boto3-pca-connector-ad (>=1.34.0,<1.35.0)"] +personalize = ["mypy-boto3-personalize (>=1.34.0,<1.35.0)"] +personalize-events = ["mypy-boto3-personalize-events (>=1.34.0,<1.35.0)"] +personalize-runtime = ["mypy-boto3-personalize-runtime (>=1.34.0,<1.35.0)"] +pi = ["mypy-boto3-pi (>=1.34.0,<1.35.0)"] +pinpoint = ["mypy-boto3-pinpoint (>=1.34.0,<1.35.0)"] +pinpoint-email = ["mypy-boto3-pinpoint-email (>=1.34.0,<1.35.0)"] +pinpoint-sms-voice = ["mypy-boto3-pinpoint-sms-voice (>=1.34.0,<1.35.0)"] +pinpoint-sms-voice-v2 = ["mypy-boto3-pinpoint-sms-voice-v2 (>=1.34.0,<1.35.0)"] +pipes = ["mypy-boto3-pipes (>=1.34.0,<1.35.0)"] +polly = ["mypy-boto3-polly (>=1.34.0,<1.35.0)"] +pricing = ["mypy-boto3-pricing (>=1.34.0,<1.35.0)"] +privatenetworks = ["mypy-boto3-privatenetworks (>=1.34.0,<1.35.0)"] +proton = ["mypy-boto3-proton (>=1.34.0,<1.35.0)"] +qbusiness = ["mypy-boto3-qbusiness (>=1.34.0,<1.35.0)"] +qconnect = ["mypy-boto3-qconnect (>=1.34.0,<1.35.0)"] +qldb = ["mypy-boto3-qldb (>=1.34.0,<1.35.0)"] +qldb-session = ["mypy-boto3-qldb-session (>=1.34.0,<1.35.0)"] +quicksight = ["mypy-boto3-quicksight (>=1.34.0,<1.35.0)"] +ram = ["mypy-boto3-ram (>=1.34.0,<1.35.0)"] +rbin = ["mypy-boto3-rbin (>=1.34.0,<1.35.0)"] +rds = ["mypy-boto3-rds (>=1.34.0,<1.35.0)"] +rds-data = ["mypy-boto3-rds-data (>=1.34.0,<1.35.0)"] +redshift = ["mypy-boto3-redshift (>=1.34.0,<1.35.0)"] +redshift-data = ["mypy-boto3-redshift-data (>=1.34.0,<1.35.0)"] +redshift-serverless = ["mypy-boto3-redshift-serverless (>=1.34.0,<1.35.0)"] +rekognition = ["mypy-boto3-rekognition (>=1.34.0,<1.35.0)"] +repostspace = ["mypy-boto3-repostspace (>=1.34.0,<1.35.0)"] +resiliencehub = ["mypy-boto3-resiliencehub (>=1.34.0,<1.35.0)"] +resource-explorer-2 = ["mypy-boto3-resource-explorer-2 (>=1.34.0,<1.35.0)"] +resource-groups = ["mypy-boto3-resource-groups (>=1.34.0,<1.35.0)"] +resourcegroupstaggingapi = ["mypy-boto3-resourcegroupstaggingapi (>=1.34.0,<1.35.0)"] +robomaker = ["mypy-boto3-robomaker (>=1.34.0,<1.35.0)"] +rolesanywhere = ["mypy-boto3-rolesanywhere (>=1.34.0,<1.35.0)"] +route53 = ["mypy-boto3-route53 (>=1.34.0,<1.35.0)"] +route53-recovery-cluster = ["mypy-boto3-route53-recovery-cluster (>=1.34.0,<1.35.0)"] +route53-recovery-control-config = ["mypy-boto3-route53-recovery-control-config (>=1.34.0,<1.35.0)"] +route53-recovery-readiness = ["mypy-boto3-route53-recovery-readiness (>=1.34.0,<1.35.0)"] +route53domains = ["mypy-boto3-route53domains (>=1.34.0,<1.35.0)"] +route53resolver = ["mypy-boto3-route53resolver (>=1.34.0,<1.35.0)"] +rum = ["mypy-boto3-rum (>=1.34.0,<1.35.0)"] +s3 = ["mypy-boto3-s3 (>=1.34.0,<1.35.0)"] +s3control = ["mypy-boto3-s3control (>=1.34.0,<1.35.0)"] +s3outposts = ["mypy-boto3-s3outposts (>=1.34.0,<1.35.0)"] +sagemaker = ["mypy-boto3-sagemaker (>=1.34.0,<1.35.0)"] +sagemaker-a2i-runtime = ["mypy-boto3-sagemaker-a2i-runtime (>=1.34.0,<1.35.0)"] +sagemaker-edge = ["mypy-boto3-sagemaker-edge (>=1.34.0,<1.35.0)"] +sagemaker-featurestore-runtime = ["mypy-boto3-sagemaker-featurestore-runtime (>=1.34.0,<1.35.0)"] +sagemaker-geospatial = ["mypy-boto3-sagemaker-geospatial (>=1.34.0,<1.35.0)"] +sagemaker-metrics = ["mypy-boto3-sagemaker-metrics (>=1.34.0,<1.35.0)"] +sagemaker-runtime = ["mypy-boto3-sagemaker-runtime (>=1.34.0,<1.35.0)"] +savingsplans = ["mypy-boto3-savingsplans (>=1.34.0,<1.35.0)"] +scheduler = ["mypy-boto3-scheduler (>=1.34.0,<1.35.0)"] +schemas = ["mypy-boto3-schemas (>=1.34.0,<1.35.0)"] +sdb = ["mypy-boto3-sdb (>=1.34.0,<1.35.0)"] +secretsmanager = ["mypy-boto3-secretsmanager (>=1.34.0,<1.35.0)"] +securityhub = ["mypy-boto3-securityhub (>=1.34.0,<1.35.0)"] +securitylake = ["mypy-boto3-securitylake (>=1.34.0,<1.35.0)"] +serverlessrepo = ["mypy-boto3-serverlessrepo (>=1.34.0,<1.35.0)"] +service-quotas = ["mypy-boto3-service-quotas (>=1.34.0,<1.35.0)"] +servicecatalog = ["mypy-boto3-servicecatalog (>=1.34.0,<1.35.0)"] +servicecatalog-appregistry = ["mypy-boto3-servicecatalog-appregistry (>=1.34.0,<1.35.0)"] +servicediscovery = ["mypy-boto3-servicediscovery (>=1.34.0,<1.35.0)"] +ses = ["mypy-boto3-ses (>=1.34.0,<1.35.0)"] +sesv2 = ["mypy-boto3-sesv2 (>=1.34.0,<1.35.0)"] +shield = ["mypy-boto3-shield (>=1.34.0,<1.35.0)"] +signer = ["mypy-boto3-signer (>=1.34.0,<1.35.0)"] +simspaceweaver = ["mypy-boto3-simspaceweaver (>=1.34.0,<1.35.0)"] +sms = ["mypy-boto3-sms (>=1.34.0,<1.35.0)"] +sms-voice = ["mypy-boto3-sms-voice (>=1.34.0,<1.35.0)"] +snow-device-management = ["mypy-boto3-snow-device-management (>=1.34.0,<1.35.0)"] +snowball = ["mypy-boto3-snowball (>=1.34.0,<1.35.0)"] +sns = ["mypy-boto3-sns (>=1.34.0,<1.35.0)"] +sqs = ["mypy-boto3-sqs (>=1.34.0,<1.35.0)"] +ssm = ["mypy-boto3-ssm (>=1.34.0,<1.35.0)"] +ssm-contacts = ["mypy-boto3-ssm-contacts (>=1.34.0,<1.35.0)"] +ssm-incidents = ["mypy-boto3-ssm-incidents (>=1.34.0,<1.35.0)"] +ssm-sap = ["mypy-boto3-ssm-sap (>=1.34.0,<1.35.0)"] +sso = ["mypy-boto3-sso (>=1.34.0,<1.35.0)"] +sso-admin = ["mypy-boto3-sso-admin (>=1.34.0,<1.35.0)"] +sso-oidc = ["mypy-boto3-sso-oidc (>=1.34.0,<1.35.0)"] +stepfunctions = ["mypy-boto3-stepfunctions (>=1.34.0,<1.35.0)"] +storagegateway = ["mypy-boto3-storagegateway (>=1.34.0,<1.35.0)"] +sts = ["mypy-boto3-sts (>=1.34.0,<1.35.0)"] +supplychain = ["mypy-boto3-supplychain (>=1.34.0,<1.35.0)"] +support = ["mypy-boto3-support (>=1.34.0,<1.35.0)"] +support-app = ["mypy-boto3-support-app (>=1.34.0,<1.35.0)"] +swf = ["mypy-boto3-swf (>=1.34.0,<1.35.0)"] +synthetics = ["mypy-boto3-synthetics (>=1.34.0,<1.35.0)"] +textract = ["mypy-boto3-textract (>=1.34.0,<1.35.0)"] +timestream-query = ["mypy-boto3-timestream-query (>=1.34.0,<1.35.0)"] +timestream-write = ["mypy-boto3-timestream-write (>=1.34.0,<1.35.0)"] +tnb = ["mypy-boto3-tnb (>=1.34.0,<1.35.0)"] +transcribe = ["mypy-boto3-transcribe (>=1.34.0,<1.35.0)"] +transfer = ["mypy-boto3-transfer (>=1.34.0,<1.35.0)"] +translate = ["mypy-boto3-translate (>=1.34.0,<1.35.0)"] +trustedadvisor = ["mypy-boto3-trustedadvisor (>=1.34.0,<1.35.0)"] +verifiedpermissions = ["mypy-boto3-verifiedpermissions (>=1.34.0,<1.35.0)"] +voice-id = ["mypy-boto3-voice-id (>=1.34.0,<1.35.0)"] +vpc-lattice = ["mypy-boto3-vpc-lattice (>=1.34.0,<1.35.0)"] +waf = ["mypy-boto3-waf (>=1.34.0,<1.35.0)"] +waf-regional = ["mypy-boto3-waf-regional (>=1.34.0,<1.35.0)"] +wafv2 = ["mypy-boto3-wafv2 (>=1.34.0,<1.35.0)"] +wellarchitected = ["mypy-boto3-wellarchitected (>=1.34.0,<1.35.0)"] +wisdom = ["mypy-boto3-wisdom (>=1.34.0,<1.35.0)"] +workdocs = ["mypy-boto3-workdocs (>=1.34.0,<1.35.0)"] +worklink = ["mypy-boto3-worklink (>=1.34.0,<1.35.0)"] +workmail = ["mypy-boto3-workmail (>=1.34.0,<1.35.0)"] +workmailmessageflow = ["mypy-boto3-workmailmessageflow (>=1.34.0,<1.35.0)"] +workspaces = ["mypy-boto3-workspaces (>=1.34.0,<1.35.0)"] +workspaces-thin-client = ["mypy-boto3-workspaces-thin-client (>=1.34.0,<1.35.0)"] +workspaces-web = ["mypy-boto3-workspaces-web (>=1.34.0,<1.35.0)"] +xray = ["mypy-boto3-xray (>=1.34.0,<1.35.0)"] + +[[package]] +name = "botocore" +version = "1.34.20" +description = "Low-level, data-driven core of boto 3." +optional = false +python-versions = ">= 3.8" +files = [ + {file = "botocore-1.34.20-py3-none-any.whl", hash = "sha256:f931f13d03e94b3350ad898b21ae2d40240f6571e8a8cdaa487951b51fe3a1fd"}, + {file = "botocore-1.34.20.tar.gz", hash = "sha256:e944bc085222a13359933f4c0a1cce228bdd8aa90e1f2274e94bd55f561db307"}, +] + +[package.dependencies] +jmespath = ">=0.7.1,<2.0.0" +python-dateutil = ">=2.1,<3.0.0" +urllib3 = {version = ">=1.25.4,<2.1", markers = "python_version >= \"3.10\""} + +[package.extras] +crt = ["awscrt (==0.19.19)"] + +[[package]] +name = "botocore-stubs" +version = "1.34.20" +description = "Type annotations and code completion for botocore" +optional = false +python-versions = ">=3.8,<4.0" +files = [ + {file = "botocore_stubs-1.34.20-py3-none-any.whl", hash = "sha256:45296ac7942578eb6baca49589115e9bfe3a803bfd3160fa42426381084f41cb"}, + {file = "botocore_stubs-1.34.20.tar.gz", hash = "sha256:e85c2716858cbed5b935133fd681537b64bb991ad4fb43d55295edfffaf494e9"}, +] + +[package.dependencies] +types-awscrt = "*" + +[package.extras] +botocore = ["botocore"] + +[[package]] +name = "bs4" +version = "0.0.1" +description = "Dummy package for Beautiful Soup" +optional = false +python-versions = "*" +files = [ + {file = "bs4-0.0.1.tar.gz", hash = "sha256:36ecea1fd7cc5c0c6e4a1ff075df26d50da647b75376626cc186e2212886dd3a"}, +] + +[package.dependencies] +beautifulsoup4 = "*" + +[[package]] +name = "build" +version = "1.0.3" +description = "A simple, correct Python build frontend" +optional = false +python-versions = ">= 3.7" +files = [ + {file = "build-1.0.3-py3-none-any.whl", hash = "sha256:589bf99a67df7c9cf07ec0ac0e5e2ea5d4b37ac63301c4986d1acb126aa83f8f"}, + {file = "build-1.0.3.tar.gz", hash = "sha256:538aab1b64f9828977f84bc63ae570b060a8ed1be419e7870b8b4fc5e6ea553b"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "os_name == \"nt\""} +packaging = ">=19.0" +pyproject_hooks = "*" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} + +[package.extras] +docs = ["furo (>=2023.08.17)", "sphinx (>=7.0,<8.0)", "sphinx-argparse-cli (>=1.5)", "sphinx-autodoc-typehints (>=1.10)", "sphinx-issues (>=3.0.0)"] +test = ["filelock (>=3)", "pytest (>=6.2.4)", "pytest-cov (>=2.12)", "pytest-mock (>=2)", "pytest-rerunfailures (>=9.1)", "pytest-xdist (>=1.34)", "setuptools (>=42.0.0)", "setuptools (>=56.0.0)", "setuptools (>=56.0.0)", "setuptools (>=67.8.0)", "wheel (>=0.36.0)"] +typing = ["importlib-metadata (>=5.1)", "mypy (>=1.5.0,<1.6.0)", "tomli", "typing-extensions (>=3.7.4.3)"] +virtualenv = ["virtualenv (>=20.0.35)"] + +[[package]] +name = "cachetools" +version = "5.3.2" +description = "Extensible memoizing collections and decorators" +optional = false +python-versions = ">=3.7" +files = [ + {file = "cachetools-5.3.2-py3-none-any.whl", hash = "sha256:861f35a13a451f94e301ce2bec7cac63e881232ccce7ed67fab9b5df4d3beaa1"}, + {file = "cachetools-5.3.2.tar.gz", hash = "sha256:086ee420196f7b2ab9ca2db2520aca326318b68fe5ba8bc4d49cca91add450f2"}, +] + +[[package]] +name = "catalogue" +version = "2.0.10" +description = "Super lightweight function registries for your library" +optional = false +python-versions = ">=3.6" +files = [ + {file = "catalogue-2.0.10-py3-none-any.whl", hash = "sha256:58c2de0020aa90f4a2da7dfad161bf7b3b054c86a5f09fcedc0b2b740c109a9f"}, + {file = "catalogue-2.0.10.tar.gz", hash = "sha256:4f56daa940913d3f09d589c191c74e5a6d51762b3a9e37dd53b7437afd6cda15"}, +] + +[[package]] +name = "certifi" +version = "2023.11.17" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2023.11.17-py3-none-any.whl", hash = "sha256:e036ab49d5b79556f99cfc2d9320b34cfbe5be05c5871b51de9329f0603b0474"}, + {file = "certifi-2023.11.17.tar.gz", hash = "sha256:9b469f3a900bf28dc19b8cfbf8019bf47f7fdd1a65a1d4ffb98fc14166beb4d1"}, +] + +[[package]] +name = "cffi" +version = "1.16.0" +description = "Foreign Function Interface for Python calling C code." +optional = false +python-versions = ">=3.8" +files = [ + {file = "cffi-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088"}, + {file = "cffi-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614"}, + {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743"}, + {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d"}, + {file = "cffi-1.16.0-cp310-cp310-win32.whl", hash = "sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a"}, + {file = "cffi-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1"}, + {file = "cffi-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404"}, + {file = "cffi-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e"}, + {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc"}, + {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb"}, + {file = "cffi-1.16.0-cp311-cp311-win32.whl", hash = "sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab"}, + {file = "cffi-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba"}, + {file = "cffi-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956"}, + {file = "cffi-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969"}, + {file = "cffi-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520"}, + {file = "cffi-1.16.0-cp312-cp312-win32.whl", hash = "sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b"}, + {file = "cffi-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235"}, + {file = "cffi-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324"}, + {file = "cffi-1.16.0-cp38-cp38-win32.whl", hash = "sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a"}, + {file = "cffi-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36"}, + {file = "cffi-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed"}, + {file = "cffi-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098"}, + {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000"}, + {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe"}, + {file = "cffi-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4"}, + {file = "cffi-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8"}, + {file = "cffi-1.16.0.tar.gz", hash = "sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0"}, +] + +[package.dependencies] +pycparser = "*" + +[[package]] +name = "cfgv" +version = "3.4.0" +description = "Validate configuration and produce human readable error messages." +optional = false +python-versions = ">=3.8" +files = [ + {file = "cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9"}, + {file = "cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560"}, +] + +[[package]] +name = "chardet" +version = "5.2.0" +description = "Universal encoding detector for Python 3" +optional = false +python-versions = ">=3.7" +files = [ + {file = "chardet-5.2.0-py3-none-any.whl", hash = "sha256:e1cf59446890a00105fe7b7912492ea04b6e6f06d4b742b2c788469e34c82970"}, + {file = "chardet-5.2.0.tar.gz", hash = "sha256:1b3b6ff479a8c414bc3fa2c0852995695c4a026dcd6d0633b2dd092ca39c1cf7"}, +] + +[[package]] +name = "charset-normalizer" +version = "3.3.2" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, + {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, +] + +[[package]] +name = "chroma-hnswlib" +version = "0.7.3" +description = "Chromas fork of hnswlib" +optional = false +python-versions = "*" +files = [ + {file = "chroma-hnswlib-0.7.3.tar.gz", hash = "sha256:b6137bedde49fffda6af93b0297fe00429fc61e5a072b1ed9377f909ed95a932"}, + {file = "chroma_hnswlib-0.7.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:59d6a7c6f863c67aeb23e79a64001d537060b6995c3eca9a06e349ff7b0998ca"}, + {file = "chroma_hnswlib-0.7.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d71a3f4f232f537b6152947006bd32bc1629a8686df22fd97777b70f416c127a"}, + {file = "chroma_hnswlib-0.7.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c92dc1ebe062188e53970ba13f6b07e0ae32e64c9770eb7f7ffa83f149d4210"}, + {file = "chroma_hnswlib-0.7.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49da700a6656fed8753f68d44b8cc8ae46efc99fc8a22a6d970dc1697f49b403"}, + {file = "chroma_hnswlib-0.7.3-cp310-cp310-win_amd64.whl", hash = "sha256:108bc4c293d819b56476d8f7865803cb03afd6ca128a2a04d678fffc139af029"}, + {file = "chroma_hnswlib-0.7.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:11e7ca93fb8192214ac2b9c0943641ac0daf8f9d4591bb7b73be808a83835667"}, + {file = "chroma_hnswlib-0.7.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6f552e4d23edc06cdeb553cdc757d2fe190cdeb10d43093d6a3319f8d4bf1c6b"}, + {file = "chroma_hnswlib-0.7.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f96f4d5699e486eb1fb95849fe35ab79ab0901265805be7e60f4eaa83ce263ec"}, + {file = "chroma_hnswlib-0.7.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:368e57fe9ebae05ee5844840fa588028a023d1182b0cfdb1d13f607c9ea05756"}, + {file = "chroma_hnswlib-0.7.3-cp311-cp311-win_amd64.whl", hash = "sha256:b7dca27b8896b494456db0fd705b689ac6b73af78e186eb6a42fea2de4f71c6f"}, + {file = "chroma_hnswlib-0.7.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:70f897dc6218afa1d99f43a9ad5eb82f392df31f57ff514ccf4eeadecd62f544"}, + {file = "chroma_hnswlib-0.7.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5aef10b4952708f5a1381c124a29aead0c356f8d7d6e0b520b778aaa62a356f4"}, + {file = "chroma_hnswlib-0.7.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ee2d8d1529fca3898d512079144ec3e28a81d9c17e15e0ea4665697a7923253"}, + {file = "chroma_hnswlib-0.7.3-cp37-cp37m-win_amd64.whl", hash = "sha256:a4021a70e898783cd6f26e00008b494c6249a7babe8774e90ce4766dd288c8ba"}, + {file = "chroma_hnswlib-0.7.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a8f61fa1d417fda848e3ba06c07671f14806a2585272b175ba47501b066fe6b1"}, + {file = "chroma_hnswlib-0.7.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d7563be58bc98e8f0866907368e22ae218d6060601b79c42f59af4eccbbd2e0a"}, + {file = "chroma_hnswlib-0.7.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51b8d411486ee70d7b66ec08cc8b9b6620116b650df9c19076d2d8b6ce2ae914"}, + {file = "chroma_hnswlib-0.7.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d706782b628e4f43f1b8a81e9120ac486837fbd9bcb8ced70fe0d9b95c72d77"}, + {file = "chroma_hnswlib-0.7.3-cp38-cp38-win_amd64.whl", hash = "sha256:54f053dedc0e3ba657f05fec6e73dd541bc5db5b09aa8bc146466ffb734bdc86"}, + {file = "chroma_hnswlib-0.7.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e607c5a71c610a73167a517062d302c0827ccdd6e259af6e4869a5c1306ffb5d"}, + {file = "chroma_hnswlib-0.7.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c2358a795870156af6761890f9eb5ca8cade57eb10c5f046fe94dae1faa04b9e"}, + {file = "chroma_hnswlib-0.7.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7cea425df2e6b8a5e201fff0d922a1cc1d165b3cfe762b1408075723c8892218"}, + {file = "chroma_hnswlib-0.7.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:454df3dd3e97aa784fba7cf888ad191e0087eef0fd8c70daf28b753b3b591170"}, + {file = "chroma_hnswlib-0.7.3-cp39-cp39-win_amd64.whl", hash = "sha256:df587d15007ca701c6de0ee7d5585dd5e976b7edd2b30ac72bc376b3c3f85882"}, +] + +[package.dependencies] +numpy = "*" + +[[package]] +name = "chromadb" +version = "0.4.22" +description = "Chroma." +optional = false +python-versions = ">=3.8" +files = [ + {file = "chromadb-0.4.22-py3-none-any.whl", hash = "sha256:ad210b27b4cda2f09d15adc9c83c81bfa66b69f39648a27b637306e40de0680d"}, + {file = "chromadb-0.4.22.tar.gz", hash = "sha256:c793149e1c2bbbb52d77602c6c0594c5752f04cd9be12619250ddad2082af27a"}, +] + +[package.dependencies] +bcrypt = ">=4.0.1" +build = ">=1.0.3" +chroma-hnswlib = "0.7.3" +fastapi = ">=0.95.2" +grpcio = ">=1.58.0" +importlib-resources = "*" +kubernetes = ">=28.1.0" +mmh3 = ">=4.0.1" +numpy = ">=1.22.5" +onnxruntime = ">=1.14.1" +opentelemetry-api = ">=1.2.0" +opentelemetry-exporter-otlp-proto-grpc = ">=1.2.0" +opentelemetry-instrumentation-fastapi = ">=0.41b0" +opentelemetry-sdk = ">=1.2.0" +overrides = ">=7.3.1" +posthog = ">=2.4.0" +pulsar-client = ">=3.1.0" +pydantic = ">=1.9" +pypika = ">=0.48.9" +PyYAML = ">=6.0.0" +requests = ">=2.28" +tenacity = ">=8.2.3" +tokenizers = ">=0.13.2" +tqdm = ">=4.65.0" +typer = ">=0.9.0" +typing-extensions = ">=4.5.0" +uvicorn = {version = ">=0.18.3", extras = ["standard"]} + +[[package]] +name = "click" +version = "8.1.7" +description = "Composable command line interface toolkit" +optional = false +python-versions = ">=3.7" +files = [ + {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, + {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[[package]] +name = "click-default-group" +version = "1.2.4" +description = "click_default_group" +optional = true +python-versions = ">=2.7" +files = [ + {file = "click_default_group-1.2.4-py2.py3-none-any.whl", hash = "sha256:9b60486923720e7fc61731bdb32b617039aba820e22e1c88766b1125592eaa5f"}, + {file = "click_default_group-1.2.4.tar.gz", hash = "sha256:eb3f3c99ec0d456ca6cd2a7f08f7d4e91771bef51b01bdd9580cc6450fe1251e"}, +] + +[package.dependencies] +click = "*" + +[package.extras] +test = ["pytest"] + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "coloredlogs" +version = "15.0.1" +description = "Colored terminal output for Python's logging module" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "coloredlogs-15.0.1-py2.py3-none-any.whl", hash = "sha256:612ee75c546f53e92e70049c9dbfcc18c935a2b9a53b66085ce9ef6a6e5c0934"}, + {file = "coloredlogs-15.0.1.tar.gz", hash = "sha256:7c991aa71a4577af2f82600d8f8f3a89f936baeaf9b50a9c197da014e5bf16b0"}, +] + +[package.dependencies] +humanfriendly = ">=9.1" + +[package.extras] +cron = ["capturer (>=2.4)"] + +[[package]] +name = "colorlog" +version = "6.8.0" +description = "Add colours to the output of Python's logging module." +optional = false +python-versions = ">=3.6" +files = [ + {file = "colorlog-6.8.0-py3-none-any.whl", hash = "sha256:4ed23b05a1154294ac99f511fabe8c1d6d4364ec1f7fc989c7fb515ccc29d375"}, + {file = "colorlog-6.8.0.tar.gz", hash = "sha256:fbb6fdf9d5685f2517f388fb29bb27d54e8654dd31f58bc2a3b217e967a95ca6"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} + +[package.extras] +development = ["black", "flake8", "mypy", "pytest", "types-colorama"] + +[[package]] +name = "confection" +version = "0.1.4" +description = "The sweetest config system for Python" +optional = false +python-versions = ">=3.6" +files = [ + {file = "confection-0.1.4-py3-none-any.whl", hash = "sha256:a658818d004939069c3e2b3db74a2cb9d956a5e61a1c9ad61788e0ee09a7090f"}, + {file = "confection-0.1.4.tar.gz", hash = "sha256:e80f22fd008b5231a2e8852fac6de9e28f2276a04031d0536cff74fe4a990c8f"}, +] + +[package.dependencies] +pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<3.0.0" +srsly = ">=2.4.0,<3.0.0" + +[[package]] +name = "contourpy" +version = "1.2.0" +description = "Python library for calculating contours of 2D quadrilateral grids" +optional = true +python-versions = ">=3.9" +files = [ + {file = "contourpy-1.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0274c1cb63625972c0c007ab14dd9ba9e199c36ae1a231ce45d725cbcbfd10a8"}, + {file = "contourpy-1.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ab459a1cbbf18e8698399c595a01f6dcc5c138220ca3ea9e7e6126232d102bb4"}, + {file = "contourpy-1.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6fdd887f17c2f4572ce548461e4f96396681212d858cae7bd52ba3310bc6f00f"}, + {file = "contourpy-1.2.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5d16edfc3fc09968e09ddffada434b3bf989bf4911535e04eada58469873e28e"}, + {file = "contourpy-1.2.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c203f617abc0dde5792beb586f827021069fb6d403d7f4d5c2b543d87edceb9"}, + {file = "contourpy-1.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b69303ceb2e4d4f146bf82fda78891ef7bcd80c41bf16bfca3d0d7eb545448aa"}, + {file = "contourpy-1.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:884c3f9d42d7218304bc74a8a7693d172685c84bd7ab2bab1ee567b769696df9"}, + {file = "contourpy-1.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4a1b1208102be6e851f20066bf0e7a96b7d48a07c9b0cfe6d0d4545c2f6cadab"}, + {file = "contourpy-1.2.0-cp310-cp310-win32.whl", hash = "sha256:34b9071c040d6fe45d9826cbbe3727d20d83f1b6110d219b83eb0e2a01d79488"}, + {file = "contourpy-1.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:bd2f1ae63998da104f16a8b788f685e55d65760cd1929518fd94cd682bf03e41"}, + {file = "contourpy-1.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:dd10c26b4eadae44783c45ad6655220426f971c61d9b239e6f7b16d5cdaaa727"}, + {file = "contourpy-1.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5c6b28956b7b232ae801406e529ad7b350d3f09a4fde958dfdf3c0520cdde0dd"}, + {file = "contourpy-1.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebeac59e9e1eb4b84940d076d9f9a6cec0064e241818bcb6e32124cc5c3e377a"}, + {file = "contourpy-1.2.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:139d8d2e1c1dd52d78682f505e980f592ba53c9f73bd6be102233e358b401063"}, + {file = "contourpy-1.2.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1e9dc350fb4c58adc64df3e0703ab076f60aac06e67d48b3848c23647ae4310e"}, + {file = "contourpy-1.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18fc2b4ed8e4a8fe849d18dce4bd3c7ea637758c6343a1f2bae1e9bd4c9f4686"}, + {file = "contourpy-1.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:16a7380e943a6d52472096cb7ad5264ecee36ed60888e2a3d3814991a0107286"}, + {file = "contourpy-1.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8d8faf05be5ec8e02a4d86f616fc2a0322ff4a4ce26c0f09d9f7fb5330a35c95"}, + {file = "contourpy-1.2.0-cp311-cp311-win32.whl", hash = "sha256:67b7f17679fa62ec82b7e3e611c43a016b887bd64fb933b3ae8638583006c6d6"}, + {file = "contourpy-1.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:99ad97258985328b4f207a5e777c1b44a83bfe7cf1f87b99f9c11d4ee477c4de"}, + {file = "contourpy-1.2.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:575bcaf957a25d1194903a10bc9f316c136c19f24e0985a2b9b5608bdf5dbfe0"}, + {file = "contourpy-1.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9e6c93b5b2dbcedad20a2f18ec22cae47da0d705d454308063421a3b290d9ea4"}, + {file = "contourpy-1.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:464b423bc2a009088f19bdf1f232299e8b6917963e2b7e1d277da5041f33a779"}, + {file = "contourpy-1.2.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:68ce4788b7d93e47f84edd3f1f95acdcd142ae60bc0e5493bfd120683d2d4316"}, + {file = "contourpy-1.2.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d7d1f8871998cdff5d2ff6a087e5e1780139abe2838e85b0b46b7ae6cc25399"}, + {file = "contourpy-1.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e739530c662a8d6d42c37c2ed52a6f0932c2d4a3e8c1f90692ad0ce1274abe0"}, + {file = "contourpy-1.2.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:247b9d16535acaa766d03037d8e8fb20866d054d3c7fbf6fd1f993f11fc60ca0"}, + {file = "contourpy-1.2.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:461e3ae84cd90b30f8d533f07d87c00379644205b1d33a5ea03381edc4b69431"}, + {file = "contourpy-1.2.0-cp312-cp312-win32.whl", hash = "sha256:1c2559d6cffc94890b0529ea7eeecc20d6fadc1539273aa27faf503eb4656d8f"}, + {file = "contourpy-1.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:491b1917afdd8638a05b611a56d46587d5a632cabead889a5440f7c638bc6ed9"}, + {file = "contourpy-1.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5fd1810973a375ca0e097dee059c407913ba35723b111df75671a1976efa04bc"}, + {file = "contourpy-1.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:999c71939aad2780f003979b25ac5b8f2df651dac7b38fb8ce6c46ba5abe6ae9"}, + {file = "contourpy-1.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b7caf9b241464c404613512d5594a6e2ff0cc9cb5615c9475cc1d9b514218ae8"}, + {file = "contourpy-1.2.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:266270c6f6608340f6c9836a0fb9b367be61dde0c9a9a18d5ece97774105ff3e"}, + {file = "contourpy-1.2.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbd50d0a0539ae2e96e537553aff6d02c10ed165ef40c65b0e27e744a0f10af8"}, + {file = "contourpy-1.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11f8d2554e52f459918f7b8e6aa20ec2a3bce35ce95c1f0ef4ba36fbda306df5"}, + {file = "contourpy-1.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ce96dd400486e80ac7d195b2d800b03e3e6a787e2a522bfb83755938465a819e"}, + {file = "contourpy-1.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6d3364b999c62f539cd403f8123ae426da946e142312a514162adb2addd8d808"}, + {file = "contourpy-1.2.0-cp39-cp39-win32.whl", hash = "sha256:1c88dfb9e0c77612febebb6ac69d44a8d81e3dc60f993215425b62c1161353f4"}, + {file = "contourpy-1.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:78e6ad33cf2e2e80c5dfaaa0beec3d61face0fb650557100ee36db808bfa6843"}, + {file = "contourpy-1.2.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:be16975d94c320432657ad2402f6760990cb640c161ae6da1363051805fa8108"}, + {file = "contourpy-1.2.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b95a225d4948b26a28c08307a60ac00fb8671b14f2047fc5476613252a129776"}, + {file = "contourpy-1.2.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:0d7e03c0f9a4f90dc18d4e77e9ef4ec7b7bbb437f7f675be8e530d65ae6ef956"}, + {file = "contourpy-1.2.0.tar.gz", hash = "sha256:171f311cb758de7da13fc53af221ae47a5877be5a0843a9fe150818c51ed276a"}, +] + +[package.dependencies] +numpy = ">=1.20,<2.0" + +[package.extras] +bokeh = ["bokeh", "selenium"] +docs = ["furo", "sphinx (>=7.2)", "sphinx-copybutton"] +mypy = ["contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.6.1)", "types-Pillow"] +test = ["Pillow", "contourpy[test-no-images]", "matplotlib"] +test-no-images = ["pytest", "pytest-cov", "pytest-xdist", "wurlitzer"] + +[[package]] +name = "coverage" +version = "7.4.0" +description = "Code coverage measurement for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "coverage-7.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:36b0ea8ab20d6a7564e89cb6135920bc9188fb5f1f7152e94e8300b7b189441a"}, + {file = "coverage-7.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0676cd0ba581e514b7f726495ea75aba3eb20899d824636c6f59b0ed2f88c471"}, + {file = "coverage-7.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0ca5c71a5a1765a0f8f88022c52b6b8be740e512980362f7fdbb03725a0d6b9"}, + {file = "coverage-7.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7c97726520f784239f6c62506bc70e48d01ae71e9da128259d61ca5e9788516"}, + {file = "coverage-7.4.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:815ac2d0f3398a14286dc2cea223a6f338109f9ecf39a71160cd1628786bc6f5"}, + {file = "coverage-7.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:80b5ee39b7f0131ebec7968baa9b2309eddb35b8403d1869e08f024efd883566"}, + {file = "coverage-7.4.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5b2ccb7548a0b65974860a78c9ffe1173cfb5877460e5a229238d985565574ae"}, + {file = "coverage-7.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:995ea5c48c4ebfd898eacb098164b3cc826ba273b3049e4a889658548e321b43"}, + {file = "coverage-7.4.0-cp310-cp310-win32.whl", hash = "sha256:79287fd95585ed36e83182794a57a46aeae0b64ca53929d1176db56aacc83451"}, + {file = "coverage-7.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:5b14b4f8760006bfdb6e08667af7bc2d8d9bfdb648351915315ea17645347137"}, + {file = "coverage-7.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:04387a4a6ecb330c1878907ce0dc04078ea72a869263e53c72a1ba5bbdf380ca"}, + {file = "coverage-7.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ea81d8f9691bb53f4fb4db603203029643caffc82bf998ab5b59ca05560f4c06"}, + {file = "coverage-7.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74775198b702868ec2d058cb92720a3c5a9177296f75bd97317c787daf711505"}, + {file = "coverage-7.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:76f03940f9973bfaee8cfba70ac991825611b9aac047e5c80d499a44079ec0bc"}, + {file = "coverage-7.4.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:485e9f897cf4856a65a57c7f6ea3dc0d4e6c076c87311d4bc003f82cfe199d25"}, + {file = "coverage-7.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6ae8c9d301207e6856865867d762a4b6fd379c714fcc0607a84b92ee63feff70"}, + {file = "coverage-7.4.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:bf477c355274a72435ceb140dc42de0dc1e1e0bf6e97195be30487d8eaaf1a09"}, + {file = "coverage-7.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:83c2dda2666fe32332f8e87481eed056c8b4d163fe18ecc690b02802d36a4d26"}, + {file = "coverage-7.4.0-cp311-cp311-win32.whl", hash = "sha256:697d1317e5290a313ef0d369650cfee1a114abb6021fa239ca12b4849ebbd614"}, + {file = "coverage-7.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:26776ff6c711d9d835557ee453082025d871e30b3fd6c27fcef14733f67f0590"}, + {file = "coverage-7.4.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:13eaf476ec3e883fe3e5fe3707caeb88268a06284484a3daf8250259ef1ba143"}, + {file = "coverage-7.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846f52f46e212affb5bcf131c952fb4075b55aae6b61adc9856222df89cbe3e2"}, + {file = "coverage-7.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26f66da8695719ccf90e794ed567a1549bb2644a706b41e9f6eae6816b398c4a"}, + {file = "coverage-7.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:164fdcc3246c69a6526a59b744b62e303039a81e42cfbbdc171c91a8cc2f9446"}, + {file = "coverage-7.4.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:316543f71025a6565677d84bc4df2114e9b6a615aa39fb165d697dba06a54af9"}, + {file = "coverage-7.4.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bb1de682da0b824411e00a0d4da5a784ec6496b6850fdf8c865c1d68c0e318dd"}, + {file = "coverage-7.4.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:0e8d06778e8fbffccfe96331a3946237f87b1e1d359d7fbe8b06b96c95a5407a"}, + {file = "coverage-7.4.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a56de34db7b7ff77056a37aedded01b2b98b508227d2d0979d373a9b5d353daa"}, + {file = "coverage-7.4.0-cp312-cp312-win32.whl", hash = "sha256:51456e6fa099a8d9d91497202d9563a320513fcf59f33991b0661a4a6f2ad450"}, + {file = "coverage-7.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:cd3c1e4cb2ff0083758f09be0f77402e1bdf704adb7f89108007300a6da587d0"}, + {file = "coverage-7.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e9d1bf53c4c8de58d22e0e956a79a5b37f754ed1ffdbf1a260d9dcfa2d8a325e"}, + {file = "coverage-7.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:109f5985182b6b81fe33323ab4707011875198c41964f014579cf82cebf2bb85"}, + {file = "coverage-7.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cc9d4bc55de8003663ec94c2f215d12d42ceea128da8f0f4036235a119c88ac"}, + {file = "coverage-7.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cc6d65b21c219ec2072c1293c505cf36e4e913a3f936d80028993dd73c7906b1"}, + {file = "coverage-7.4.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a10a4920def78bbfff4eff8a05c51be03e42f1c3735be42d851f199144897ba"}, + {file = "coverage-7.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b8e99f06160602bc64da35158bb76c73522a4010f0649be44a4e167ff8555952"}, + {file = "coverage-7.4.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:7d360587e64d006402b7116623cebf9d48893329ef035278969fa3bbf75b697e"}, + {file = "coverage-7.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:29f3abe810930311c0b5d1a7140f6395369c3db1be68345638c33eec07535105"}, + {file = "coverage-7.4.0-cp38-cp38-win32.whl", hash = "sha256:5040148f4ec43644702e7b16ca864c5314ccb8ee0751ef617d49aa0e2d6bf4f2"}, + {file = "coverage-7.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:9864463c1c2f9cb3b5db2cf1ff475eed2f0b4285c2aaf4d357b69959941aa555"}, + {file = "coverage-7.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:936d38794044b26c99d3dd004d8af0035ac535b92090f7f2bb5aa9c8e2f5cd42"}, + {file = "coverage-7.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:799c8f873794a08cdf216aa5d0531c6a3747793b70c53f70e98259720a6fe2d7"}, + {file = "coverage-7.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e7defbb9737274023e2d7af02cac77043c86ce88a907c58f42b580a97d5bcca9"}, + {file = "coverage-7.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a1526d265743fb49363974b7aa8d5899ff64ee07df47dd8d3e37dcc0818f09ed"}, + {file = "coverage-7.4.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf635a52fc1ea401baf88843ae8708591aa4adff875e5c23220de43b1ccf575c"}, + {file = "coverage-7.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:756ded44f47f330666843b5781be126ab57bb57c22adbb07d83f6b519783b870"}, + {file = "coverage-7.4.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:0eb3c2f32dabe3a4aaf6441dde94f35687224dfd7eb2a7f47f3fd9428e421058"}, + {file = "coverage-7.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bfd5db349d15c08311702611f3dccbef4b4e2ec148fcc636cf8739519b4a5c0f"}, + {file = "coverage-7.4.0-cp39-cp39-win32.whl", hash = "sha256:53d7d9158ee03956e0eadac38dfa1ec8068431ef8058fe6447043db1fb40d932"}, + {file = "coverage-7.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:cfd2a8b6b0d8e66e944d47cdec2f47c48fef2ba2f2dff5a9a75757f64172857e"}, + {file = "coverage-7.4.0-pp38.pp39.pp310-none-any.whl", hash = "sha256:c530833afc4707fe48524a44844493f36d8727f04dcce91fb978c414a8556cc6"}, + {file = "coverage-7.4.0.tar.gz", hash = "sha256:707c0f58cb1712b8809ece32b68996ee1e609f71bd14615bd8f87a1293cb610e"}, +] + +[package.dependencies] +tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} + +[package.extras] +toml = ["tomli"] + +[[package]] +name = "cssselect" +version = "1.2.0" +description = "cssselect parses CSS3 Selectors and translates them to XPath 1.0" +optional = false +python-versions = ">=3.7" +files = [ + {file = "cssselect-1.2.0-py2.py3-none-any.whl", hash = "sha256:da1885f0c10b60c03ed5eccbb6b68d6eff248d91976fcde348f395d54c9fd35e"}, + {file = "cssselect-1.2.0.tar.gz", hash = "sha256:666b19839cfaddb9ce9d36bfe4c969132c647b92fc9088c4e23f786b30f1b3dc"}, +] + +[[package]] +name = "curl-cffi" +version = "0.6.2" +description = "libcurl ffi bindings for Python, with impersonation support" +optional = false +python-versions = ">=3.8" +files = [ + {file = "curl_cffi-0.6.2-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:23b8a2872b160718c04b06b1f8aa4fb1a2f4f94bce7040493515e081a27cad19"}, + {file = "curl_cffi-0.6.2-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:ad3c1cf5360810825ec4bc3da425f26ee4098878a615dab9d309a99afd883ba9"}, + {file = "curl_cffi-0.6.2-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d01de6ed737ad1924aaa0198195b9020c38e77ce90ea3d72b9eacf4938c7adf"}, + {file = "curl_cffi-0.6.2-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37e513cc149d024a2d625e202f2cc9d4423d2937343ea2e06f797d99779e62dc"}, + {file = "curl_cffi-0.6.2-cp38-abi3-win32.whl", hash = "sha256:12e829af97cbf7c1d5afef177e786f6f404ddf163b08897a1ed087cadbeb4837"}, + {file = "curl_cffi-0.6.2-cp38-abi3-win_amd64.whl", hash = "sha256:3791b7a9ae4cb1298165300f2dc2d60a86779f055570ae83163fc2d8a74bf714"}, + {file = "curl_cffi-0.6.2.tar.gz", hash = "sha256:9ee519e960b5fc6e0bbf13d0ecba9ce5f6306cb929354504bf03cc30f59a8f63"}, +] + +[package.dependencies] +certifi = "*" +cffi = ">=1.12.0" + +[package.extras] +build = ["cibuildwheel", "wheel"] +dev = ["autoflake (==1.4)", "coverage (==6.4.1)", "cryptography (==38.0.3)", "flake8 (==6.0.0)", "flake8-bugbear (==22.7.1)", "flake8-pie (==0.15.0)", "httpx (==0.23.1)", "mypy (==0.971)", "nest-asyncio (==1.6.0)", "pytest (==7.1.2)", "pytest-asyncio (==0.19.0)", "pytest-trio (==0.7.0)", "ruff (==0.1.14)", "trio (==0.21.0)", "trio-typing (==0.7.0)", "trustme (==0.9.0)", "types-certifi (==2021.10.8.2)", "uvicorn (==0.18.3)", "websockets (==11.0.3)"] +test = ["cryptography (==38.0.3)", "fastapi (==0.100.0)", "httpx (==0.23.1)", "nest-asyncio (==1.6.0)", "proxy.py (==2.4.3)", "pytest (==7.1.2)", "pytest-asyncio (==0.19.0)", "pytest-trio (==0.7.0)", "python-multipart (==0.0.6)", "trio (==0.21.0)", "trio-typing (==0.7.0)", "trustme (==0.9.0)", "types-certifi (==2021.10.8.2)", "uvicorn (==0.18.3)", "websockets (==11.0.3)"] + +[[package]] +name = "cycler" +version = "0.12.1" +description = "Composable style cycles" +optional = true +python-versions = ">=3.8" +files = [ + {file = "cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30"}, + {file = "cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c"}, +] + +[package.extras] +docs = ["ipython", "matplotlib", "numpydoc", "sphinx"] +tests = ["pytest", "pytest-cov", "pytest-xdist"] + +[[package]] +name = "cymem" +version = "2.0.8" +description = "Manage calls to calloc/free through Cython" +optional = false +python-versions = "*" +files = [ + {file = "cymem-2.0.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:77b5d3a73c41a394efd5913ab7e48512054cd2dabb9582d489535456641c7666"}, + {file = "cymem-2.0.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bd33da892fb560ba85ea14b1528c381ff474048e861accc3366c8b491035a378"}, + {file = "cymem-2.0.8-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29a551eda23eebd6d076b855f77a5ed14a1d1cae5946f7b3cb5de502e21b39b0"}, + {file = "cymem-2.0.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8260445652ae5ab19fff6851f32969a7b774f309162e83367dd0f69aac5dbf7"}, + {file = "cymem-2.0.8-cp310-cp310-win_amd64.whl", hash = "sha256:a63a2bef4c7e0aec7c9908bca0a503bf91ac7ec18d41dd50dc7dff5d994e4387"}, + {file = "cymem-2.0.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6b84b780d52cb2db53d4494fe0083c4c5ee1f7b5380ceaea5b824569009ee5bd"}, + {file = "cymem-2.0.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0d5f83dc3cb5a39f0e32653cceb7c8ce0183d82f1162ca418356f4a8ed9e203e"}, + {file = "cymem-2.0.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ac218cf8a43a761dc6b2f14ae8d183aca2bbb85b60fe316fd6613693b2a7914"}, + {file = "cymem-2.0.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42c993589d1811ec665d37437d5677b8757f53afadd927bf8516ac8ce2d3a50c"}, + {file = "cymem-2.0.8-cp311-cp311-win_amd64.whl", hash = "sha256:ab3cf20e0eabee9b6025ceb0245dadd534a96710d43fb7a91a35e0b9e672ee44"}, + {file = "cymem-2.0.8-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cb51fddf1b920abb1f2742d1d385469bc7b4b8083e1cfa60255e19bc0900ccb5"}, + {file = "cymem-2.0.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9235957f8c6bc2574a6a506a1687164ad629d0b4451ded89d49ebfc61b52660c"}, + {file = "cymem-2.0.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a2cc38930ff5409f8d61f69a01e39ecb185c175785a1c9bec13bcd3ac8a614ba"}, + {file = "cymem-2.0.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bf49e3ea2c441f7b7848d5c61b50803e8cbd49541a70bb41ad22fce76d87603"}, + {file = "cymem-2.0.8-cp312-cp312-win_amd64.whl", hash = "sha256:ecd12e3bacf3eed5486e4cd8ede3c12da66ee0e0a9d0ae046962bc2bb503acef"}, + {file = "cymem-2.0.8-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:167d8019db3b40308aabf8183fd3fbbc256323b645e0cbf2035301058c439cd0"}, + {file = "cymem-2.0.8-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17cd2c2791c8f6b52f269a756ba7463f75bf7265785388a2592623b84bb02bf8"}, + {file = "cymem-2.0.8-cp36-cp36m-win_amd64.whl", hash = "sha256:6204f0a3307bf45d109bf698ba37997ce765f21e359284328e4306c7500fcde8"}, + {file = "cymem-2.0.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b9c05db55ea338648f8e5f51dd596568c7f62c5ae32bf3fa5b1460117910ebae"}, + {file = "cymem-2.0.8-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ce641f7ba0489bd1b42a4335a36f38c8507daffc29a512681afaba94a0257d2"}, + {file = "cymem-2.0.8-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e6b83a5972a64f62796118da79dfeed71f4e1e770b2b7455e889c909504c2358"}, + {file = "cymem-2.0.8-cp37-cp37m-win_amd64.whl", hash = "sha256:ada6eb022e4a0f4f11e6356a5d804ceaa917174e6cf33c0b3e371dbea4dd2601"}, + {file = "cymem-2.0.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1e593cd57e2e19eb50c7ddaf7e230b73c890227834425b9dadcd4a86834ef2ab"}, + {file = "cymem-2.0.8-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d513f0d5c6d76facdc605e42aa42c8d50bb7dedca3144ec2b47526381764deb0"}, + {file = "cymem-2.0.8-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e370dd54359101b125bfb191aca0542718077b4edb90ccccba1a28116640fed"}, + {file = "cymem-2.0.8-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84f8c58cde71b8fc7024883031a4eec66c0a9a4d36b7850c3065493652695156"}, + {file = "cymem-2.0.8-cp38-cp38-win_amd64.whl", hash = "sha256:6a6edddb30dd000a27987fcbc6f3c23b7fe1d74f539656952cb086288c0e4e29"}, + {file = "cymem-2.0.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b896c83c08dadafe8102a521f83b7369a9c5cc3e7768eca35875764f56703f4c"}, + {file = "cymem-2.0.8-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a4f8f2bfee34f6f38b206997727d29976666c89843c071a968add7d61a1e8024"}, + {file = "cymem-2.0.8-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7372e2820fa66fd47d3b135f3eb574ab015f90780c3a21cfd4809b54f23a4723"}, + {file = "cymem-2.0.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4e57bee56d35b90fc2cba93e75b2ce76feaca05251936e28a96cf812a1f5dda"}, + {file = "cymem-2.0.8-cp39-cp39-win_amd64.whl", hash = "sha256:ceeab3ce2a92c7f3b2d90854efb32cb203e78cb24c836a5a9a2cac221930303b"}, + {file = "cymem-2.0.8.tar.gz", hash = "sha256:8fb09d222e21dcf1c7e907dc85cf74501d4cea6c4ed4ac6c9e016f98fb59cbbf"}, +] + +[[package]] +name = "decorator" +version = "5.1.1" +description = "Decorators for Humans" +optional = true +python-versions = ">=3.5" +files = [ + {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"}, + {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"}, +] + +[[package]] +name = "demjson3" +version = "3.0.6" +description = "encoder, decoder, and lint/validator for JSON (JavaScript Object Notation) compliant with RFC 7159" +optional = false +python-versions = "*" +files = [ + {file = "demjson3-3.0.6.tar.gz", hash = "sha256:37c83b0c6eb08d25defc88df0a2a4875d58a7809a9650bd6eee7afd8053cdbac"}, +] + +[[package]] +name = "deprecated" +version = "1.2.14" +description = "Python @deprecated decorator to deprecate old python classes, functions or methods." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "Deprecated-1.2.14-py2.py3-none-any.whl", hash = "sha256:6fac8b097794a90302bdbb17b9b815e732d3c4720583ff1b198499d78470466c"}, + {file = "Deprecated-1.2.14.tar.gz", hash = "sha256:e5323eb936458dccc2582dc6f9c322c852a775a27065ff2b0c4970b9d53d01b3"}, +] + +[package.dependencies] +wrapt = ">=1.10,<2" + +[package.extras] +dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "sphinx (<2)", "tox"] + +[[package]] +name = "distlib" +version = "0.3.8" +description = "Distribution utilities" +optional = false +python-versions = "*" +files = [ + {file = "distlib-0.3.8-py2.py3-none-any.whl", hash = "sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784"}, + {file = "distlib-0.3.8.tar.gz", hash = "sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64"}, +] + +[[package]] +name = "distro" +version = "1.9.0" +description = "Distro - an OS platform information API" +optional = false +python-versions = ">=3.6" +files = [ + {file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"}, + {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"}, +] + +[[package]] +name = "dnspython" +version = "2.4.2" +description = "DNS toolkit" +optional = false +python-versions = ">=3.8,<4.0" +files = [ + {file = "dnspython-2.4.2-py3-none-any.whl", hash = "sha256:57c6fbaaeaaf39c891292012060beb141791735dbb4004798328fc2c467402d8"}, + {file = "dnspython-2.4.2.tar.gz", hash = "sha256:8dcfae8c7460a2f84b4072e26f1c9f4101ca20c071649cb7c34e8b6a93d58984"}, +] + +[package.extras] +dnssec = ["cryptography (>=2.6,<42.0)"] +doh = ["h2 (>=4.1.0)", "httpcore (>=0.17.3)", "httpx (>=0.24.1)"] +doq = ["aioquic (>=0.9.20)"] +idna = ["idna (>=2.1,<4.0)"] +trio = ["trio (>=0.14,<0.23)"] +wmi = ["wmi (>=1.5.1,<2.0.0)"] + +[[package]] +name = "docker" +version = "7.0.0" +description = "A Python library for the Docker Engine API." +optional = false +python-versions = ">=3.8" +files = [ + {file = "docker-7.0.0-py3-none-any.whl", hash = "sha256:12ba681f2777a0ad28ffbcc846a69c31b4dfd9752b47eb425a274ee269c5e14b"}, + {file = "docker-7.0.0.tar.gz", hash = "sha256:323736fb92cd9418fc5e7133bc953e11a9da04f4483f828b527db553f1e7e5a3"}, +] + +[package.dependencies] +packaging = ">=14.0" +pywin32 = {version = ">=304", markers = "sys_platform == \"win32\""} +requests = ">=2.26.0" +urllib3 = ">=1.26.0" + +[package.extras] +ssh = ["paramiko (>=2.4.3)"] +websockets = ["websocket-client (>=1.3.0)"] + +[[package]] +name = "duckduckgo-search" +version = "5.3.0" +description = "Search for words, documents, images, news, maps and text translation using the DuckDuckGo.com search engine." +optional = false +python-versions = ">=3.8" +files = [ + {file = "duckduckgo_search-5.3.0-py3-none-any.whl", hash = "sha256:c6a6ddc3cdefc6bb7736c49fa9bdbd0a7a6bdf7ace50260cf06f8300341c9441"}, + {file = "duckduckgo_search-5.3.0.tar.gz", hash = "sha256:da6328f977295077d1095625474060b688980ae14bc196cee05f13a74f801e9a"}, +] + +[package.dependencies] +click = ">=8.1.7" +curl-cffi = ">=0.6.2" +orjson = ">=3.10.0" + +[package.extras] +dev = ["mypy (>=1.9.0)", "pytest (>=8.1.1)", "ruff (>=0.3.4)"] +lxml = ["lxml (>=5.1.1)"] + +[[package]] +name = "en-core-web-sm" +version = "3.5.0" +description = "English pipeline optimized for CPU. Components: tok2vec, tagger, parser, senter, ner, attribute_ruler, lemmatizer." +optional = false +python-versions = "*" +files = [ + {file = "en_core_web_sm-3.5.0-py3-none-any.whl", hash = "sha256:0964370218b7e1672a30ac50d72cdc6b16f7c867496f1d60925691188f4d2510"}, +] + +[package.dependencies] +spacy = ">=3.5.0,<3.6.0" + +[package.source] +type = "url" +url = "https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.5.0/en_core_web_sm-3.5.0-py3-none-any.whl" + +[[package]] +name = "exceptiongroup" +version = "1.2.0" +description = "Backport of PEP 654 (exception groups)" +optional = false +python-versions = ">=3.7" +files = [ + {file = "exceptiongroup-1.2.0-py3-none-any.whl", hash = "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14"}, + {file = "exceptiongroup-1.2.0.tar.gz", hash = "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68"}, +] + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "execnet" +version = "2.0.2" +description = "execnet: rapid multi-Python deployment" +optional = false +python-versions = ">=3.7" +files = [ + {file = "execnet-2.0.2-py3-none-any.whl", hash = "sha256:88256416ae766bc9e8895c76a87928c0012183da3cc4fc18016e6f050e025f41"}, + {file = "execnet-2.0.2.tar.gz", hash = "sha256:cc59bc4423742fd71ad227122eb0dd44db51efb3dc4095b45ac9a08c770096af"}, +] + +[package.extras] +testing = ["hatch", "pre-commit", "pytest", "tox"] + +[[package]] +name = "executing" +version = "2.0.1" +description = "Get the currently executing AST node of a frame, and other information" +optional = true +python-versions = ">=3.5" +files = [ + {file = "executing-2.0.1-py2.py3-none-any.whl", hash = "sha256:eac49ca94516ccc753f9fb5ce82603156e590b27525a8bc32cce8ae302eb61bc"}, + {file = "executing-2.0.1.tar.gz", hash = "sha256:35afe2ce3affba8ee97f2d69927fa823b08b472b7b994e36a52a964b93d16147"}, +] + +[package.extras] +tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipython", "littleutils", "pytest", "rich"] + +[[package]] +name = "fastapi" +version = "0.109.2" +description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" +optional = false +python-versions = ">=3.8" +files = [ + {file = "fastapi-0.109.2-py3-none-any.whl", hash = "sha256:2c9bab24667293b501cad8dd388c05240c850b58ec5876ee3283c47d6e1e3a4d"}, + {file = "fastapi-0.109.2.tar.gz", hash = "sha256:f3817eac96fe4f65a2ebb4baa000f394e55f5fccdaf7f75250804bc58f354f73"}, +] + +[package.dependencies] +pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<2.0.0 || >2.0.0,<2.0.1 || >2.0.1,<2.1.0 || >2.1.0,<3.0.0" +starlette = ">=0.36.3,<0.37.0" +typing-extensions = ">=4.8.0" + +[package.extras] +all = ["email-validator (>=2.0.0)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=2.11.2)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.7)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"] + +[[package]] +name = "filelock" +version = "3.13.1" +description = "A platform independent file lock." +optional = false +python-versions = ">=3.8" +files = [ + {file = "filelock-3.13.1-py3-none-any.whl", hash = "sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c"}, + {file = "filelock-3.13.1.tar.gz", hash = "sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e"}, +] + +[package.extras] +docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.24)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)"] +typing = ["typing-extensions (>=4.8)"] + +[[package]] +name = "flake8" +version = "7.0.0" +description = "the modular source code checker: pep8 pyflakes and co" +optional = false +python-versions = ">=3.8.1" +files = [ + {file = "flake8-7.0.0-py2.py3-none-any.whl", hash = "sha256:a6dfbb75e03252917f2473ea9653f7cd799c3064e54d4c8140044c5c065f53c3"}, + {file = "flake8-7.0.0.tar.gz", hash = "sha256:33f96621059e65eec474169085dc92bf26e7b2d47366b70be2f67ab80dc25132"}, +] + +[package.dependencies] +mccabe = ">=0.7.0,<0.8.0" +pycodestyle = ">=2.11.0,<2.12.0" +pyflakes = ">=3.2.0,<3.3.0" + +[[package]] +name = "flatbuffers" +version = "23.5.26" +description = "The FlatBuffers serialization format for Python" +optional = false +python-versions = "*" +files = [ + {file = "flatbuffers-23.5.26-py2.py3-none-any.whl", hash = "sha256:c0ff356da363087b915fde4b8b45bdda73432fc17cddb3c8157472eab1422ad1"}, + {file = "flatbuffers-23.5.26.tar.gz", hash = "sha256:9ea1144cac05ce5d86e2859f431c6cd5e66cd9c78c558317c7955fb8d4c78d89"}, +] + +[[package]] +name = "fonttools" +version = "4.47.2" +description = "Tools to manipulate font files" +optional = true +python-versions = ">=3.8" +files = [ + {file = "fonttools-4.47.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3b629108351d25512d4ea1a8393a2dba325b7b7d7308116b605ea3f8e1be88df"}, + {file = "fonttools-4.47.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c19044256c44fe299d9a73456aabee4b4d06c6b930287be93b533b4737d70aa1"}, + {file = "fonttools-4.47.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b8be28c036b9f186e8c7eaf8a11b42373e7e4949f9e9f370202b9da4c4c3f56c"}, + {file = "fonttools-4.47.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f83a4daef6d2a202acb9bf572958f91cfde5b10c8ee7fb1d09a4c81e5d851fd8"}, + {file = "fonttools-4.47.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4a5a5318ba5365d992666ac4fe35365f93004109d18858a3e18ae46f67907670"}, + {file = "fonttools-4.47.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8f57ecd742545362a0f7186774b2d1c53423ed9ece67689c93a1055b236f638c"}, + {file = "fonttools-4.47.2-cp310-cp310-win32.whl", hash = "sha256:a1c154bb85dc9a4cf145250c88d112d88eb414bad81d4cb524d06258dea1bdc0"}, + {file = "fonttools-4.47.2-cp310-cp310-win_amd64.whl", hash = "sha256:3e2b95dce2ead58fb12524d0ca7d63a63459dd489e7e5838c3cd53557f8933e1"}, + {file = "fonttools-4.47.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:29495d6d109cdbabe73cfb6f419ce67080c3ef9ea1e08d5750240fd4b0c4763b"}, + {file = "fonttools-4.47.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0a1d313a415eaaba2b35d6cd33536560deeebd2ed758b9bfb89ab5d97dc5deac"}, + {file = "fonttools-4.47.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:90f898cdd67f52f18049250a6474185ef6544c91f27a7bee70d87d77a8daf89c"}, + {file = "fonttools-4.47.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3480eeb52770ff75140fe7d9a2ec33fb67b07efea0ab5129c7e0c6a639c40c70"}, + {file = "fonttools-4.47.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0255dbc128fee75fb9be364806b940ed450dd6838672a150d501ee86523ac61e"}, + {file = "fonttools-4.47.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f791446ff297fd5f1e2247c188de53c1bfb9dd7f0549eba55b73a3c2087a2703"}, + {file = "fonttools-4.47.2-cp311-cp311-win32.whl", hash = "sha256:740947906590a878a4bde7dd748e85fefa4d470a268b964748403b3ab2aeed6c"}, + {file = "fonttools-4.47.2-cp311-cp311-win_amd64.whl", hash = "sha256:63fbed184979f09a65aa9c88b395ca539c94287ba3a364517698462e13e457c9"}, + {file = "fonttools-4.47.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:4ec558c543609e71b2275c4894e93493f65d2f41c15fe1d089080c1d0bb4d635"}, + {file = "fonttools-4.47.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e040f905d542362e07e72e03612a6270c33d38281fd573160e1003e43718d68d"}, + {file = "fonttools-4.47.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6dd58cc03016b281bd2c74c84cdaa6bd3ce54c5a7f47478b7657b930ac3ed8eb"}, + {file = "fonttools-4.47.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:32ab2e9702dff0dd4510c7bb958f265a8d3dd5c0e2547e7b5f7a3df4979abb07"}, + {file = "fonttools-4.47.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3a808f3c1d1df1f5bf39be869b6e0c263570cdafb5bdb2df66087733f566ea71"}, + {file = "fonttools-4.47.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ac71e2e201df041a2891067dc36256755b1229ae167edbdc419b16da78732c2f"}, + {file = "fonttools-4.47.2-cp312-cp312-win32.whl", hash = "sha256:69731e8bea0578b3c28fdb43dbf95b9386e2d49a399e9a4ad736b8e479b08085"}, + {file = "fonttools-4.47.2-cp312-cp312-win_amd64.whl", hash = "sha256:b3e1304e5f19ca861d86a72218ecce68f391646d85c851742d265787f55457a4"}, + {file = "fonttools-4.47.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:254d9a6f7be00212bf0c3159e0a420eb19c63793b2c05e049eb337f3023c5ecc"}, + {file = "fonttools-4.47.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:eabae77a07c41ae0b35184894202305c3ad211a93b2eb53837c2a1143c8bc952"}, + {file = "fonttools-4.47.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a86a5ab2873ed2575d0fcdf1828143cfc6b977ac448e3dc616bb1e3d20efbafa"}, + {file = "fonttools-4.47.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13819db8445a0cec8c3ff5f243af6418ab19175072a9a92f6cc8ca7d1452754b"}, + {file = "fonttools-4.47.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4e743935139aa485fe3253fc33fe467eab6ea42583fa681223ea3f1a93dd01e6"}, + {file = "fonttools-4.47.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d49ce3ea7b7173faebc5664872243b40cf88814ca3eb135c4a3cdff66af71946"}, + {file = "fonttools-4.47.2-cp38-cp38-win32.whl", hash = "sha256:94208ea750e3f96e267f394d5588579bb64cc628e321dbb1d4243ffbc291b18b"}, + {file = "fonttools-4.47.2-cp38-cp38-win_amd64.whl", hash = "sha256:0f750037e02beb8b3569fbff701a572e62a685d2a0e840d75816592280e5feae"}, + {file = "fonttools-4.47.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3d71606c9321f6701642bd4746f99b6089e53d7e9817fc6b964e90d9c5f0ecc6"}, + {file = "fonttools-4.47.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:86e0427864c6c91cf77f16d1fb9bf1bbf7453e824589e8fb8461b6ee1144f506"}, + {file = "fonttools-4.47.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a00bd0e68e88987dcc047ea31c26d40a3c61185153b03457956a87e39d43c37"}, + {file = "fonttools-4.47.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a5d77479fb885ef38a16a253a2f4096bc3d14e63a56d6246bfdb56365a12b20c"}, + {file = "fonttools-4.47.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5465df494f20a7d01712b072ae3ee9ad2887004701b95cb2cc6dcb9c2c97a899"}, + {file = "fonttools-4.47.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4c811d3c73b6abac275babb8aa439206288f56fdb2c6f8835e3d7b70de8937a7"}, + {file = "fonttools-4.47.2-cp39-cp39-win32.whl", hash = "sha256:5b60e3afa9635e3dfd3ace2757039593e3bd3cf128be0ddb7a1ff4ac45fa5a50"}, + {file = "fonttools-4.47.2-cp39-cp39-win_amd64.whl", hash = "sha256:7ee48bd9d6b7e8f66866c9090807e3a4a56cf43ffad48962725a190e0dd774c8"}, + {file = "fonttools-4.47.2-py3-none-any.whl", hash = "sha256:7eb7ad665258fba68fd22228a09f347469d95a97fb88198e133595947a20a184"}, + {file = "fonttools-4.47.2.tar.gz", hash = "sha256:7df26dd3650e98ca45f1e29883c96a0b9f5bb6af8d632a6a108bc744fa0bd9b3"}, +] + +[package.extras] +all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0,<5)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "pycairo", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.1.0)", "xattr", "zopfli (>=0.1.4)"] +graphite = ["lz4 (>=1.7.4.2)"] +interpolatable = ["munkres", "pycairo", "scipy"] +lxml = ["lxml (>=4.0,<5)"] +pathops = ["skia-pathops (>=0.5.0)"] +plot = ["matplotlib"] +repacker = ["uharfbuzz (>=0.23.0)"] +symfont = ["sympy"] +type1 = ["xattr"] +ufo = ["fs (>=2.2.0,<3)"] +unicode = ["unicodedata2 (>=15.1.0)"] +woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"] + +[[package]] +name = "frozenlist" +version = "1.4.1" +description = "A list-like structure which implements collections.abc.MutableSequence" +optional = false +python-versions = ">=3.8" +files = [ + {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f9aa1878d1083b276b0196f2dfbe00c9b7e752475ed3b682025ff20c1c1f51ac"}, + {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:29acab3f66f0f24674b7dc4736477bcd4bc3ad4b896f5f45379a67bce8b96868"}, + {file = "frozenlist-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:74fb4bee6880b529a0c6560885fce4dc95936920f9f20f53d99a213f7bf66776"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:590344787a90ae57d62511dd7c736ed56b428f04cd8c161fcc5e7232c130c69a"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:068b63f23b17df8569b7fdca5517edef76171cf3897eb68beb01341131fbd2ad"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c849d495bf5154cd8da18a9eb15db127d4dba2968d88831aff6f0331ea9bd4c"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9750cc7fe1ae3b1611bb8cfc3f9ec11d532244235d75901fb6b8e42ce9229dfe"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9b2de4cf0cdd5bd2dee4c4f63a653c61d2408055ab77b151c1957f221cabf2a"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0633c8d5337cb5c77acbccc6357ac49a1770b8c487e5b3505c57b949b4b82e98"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:27657df69e8801be6c3638054e202a135c7f299267f1a55ed3a598934f6c0d75"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:f9a3ea26252bd92f570600098783d1371354d89d5f6b7dfd87359d669f2109b5"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:4f57dab5fe3407b6c0c1cc907ac98e8a189f9e418f3b6e54d65a718aaafe3950"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e02a0e11cf6597299b9f3bbd3f93d79217cb90cfd1411aec33848b13f5c656cc"}, + {file = "frozenlist-1.4.1-cp310-cp310-win32.whl", hash = "sha256:a828c57f00f729620a442881cc60e57cfcec6842ba38e1b19fd3e47ac0ff8dc1"}, + {file = "frozenlist-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:f56e2333dda1fe0f909e7cc59f021eba0d2307bc6f012a1ccf2beca6ba362439"}, + {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a0cb6f11204443f27a1628b0e460f37fb30f624be6051d490fa7d7e26d4af3d0"}, + {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b46c8ae3a8f1f41a0d2ef350c0b6e65822d80772fe46b653ab6b6274f61d4a49"}, + {file = "frozenlist-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fde5bd59ab5357e3853313127f4d3565fc7dad314a74d7b5d43c22c6a5ed2ced"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:722e1124aec435320ae01ee3ac7bec11a5d47f25d0ed6328f2273d287bc3abb0"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2471c201b70d58a0f0c1f91261542a03d9a5e088ed3dc6c160d614c01649c106"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c757a9dd70d72b076d6f68efdbb9bc943665ae954dad2801b874c8c69e185068"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f146e0911cb2f1da549fc58fc7bcd2b836a44b79ef871980d605ec392ff6b0d2"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f9c515e7914626b2a2e1e311794b4c35720a0be87af52b79ff8e1429fc25f19"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c302220494f5c1ebeb0912ea782bcd5e2f8308037b3c7553fad0e48ebad6ad82"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:442acde1e068288a4ba7acfe05f5f343e19fac87bfc96d89eb886b0363e977ec"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:1b280e6507ea8a4fa0c0a7150b4e526a8d113989e28eaaef946cc77ffd7efc0a"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:fe1a06da377e3a1062ae5fe0926e12b84eceb8a50b350ddca72dc85015873f74"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:db9e724bebd621d9beca794f2a4ff1d26eed5965b004a97f1f1685a173b869c2"}, + {file = "frozenlist-1.4.1-cp311-cp311-win32.whl", hash = "sha256:e774d53b1a477a67838a904131c4b0eef6b3d8a651f8b138b04f748fccfefe17"}, + {file = "frozenlist-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:fb3c2db03683b5767dedb5769b8a40ebb47d6f7f45b1b3e3b4b51ec8ad9d9825"}, + {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1979bc0aeb89b33b588c51c54ab0161791149f2461ea7c7c946d95d5f93b56ae"}, + {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cc7b01b3754ea68a62bd77ce6020afaffb44a590c2289089289363472d13aedb"}, + {file = "frozenlist-1.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9c92be9fd329ac801cc420e08452b70e7aeab94ea4233a4804f0915c14eba9b"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c3894db91f5a489fc8fa6a9991820f368f0b3cbdb9cd8849547ccfab3392d86"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba60bb19387e13597fb059f32cd4d59445d7b18b69a745b8f8e5db0346f33480"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8aefbba5f69d42246543407ed2461db31006b0f76c4e32dfd6f42215a2c41d09"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780d3a35680ced9ce682fbcf4cb9c2bad3136eeff760ab33707b71db84664e3a"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9acbb16f06fe7f52f441bb6f413ebae6c37baa6ef9edd49cdd567216da8600cd"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:23b701e65c7b36e4bf15546a89279bd4d8675faabc287d06bbcfac7d3c33e1e6"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3e0153a805a98f5ada7e09826255ba99fb4f7524bb81bf6b47fb702666484ae1"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:dd9b1baec094d91bf36ec729445f7769d0d0cf6b64d04d86e45baf89e2b9059b"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:1a4471094e146b6790f61b98616ab8e44f72661879cc63fa1049d13ef711e71e"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5667ed53d68d91920defdf4035d1cdaa3c3121dc0b113255124bcfada1cfa1b8"}, + {file = "frozenlist-1.4.1-cp312-cp312-win32.whl", hash = "sha256:beee944ae828747fd7cb216a70f120767fc9f4f00bacae8543c14a6831673f89"}, + {file = "frozenlist-1.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:64536573d0a2cb6e625cf309984e2d873979709f2cf22839bf2d61790b448ad5"}, + {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:20b51fa3f588ff2fe658663db52a41a4f7aa6c04f6201449c6c7c476bd255c0d"}, + {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:410478a0c562d1a5bcc2f7ea448359fcb050ed48b3c6f6f4f18c313a9bdb1826"}, + {file = "frozenlist-1.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c6321c9efe29975232da3bd0af0ad216800a47e93d763ce64f291917a381b8eb"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48f6a4533887e189dae092f1cf981f2e3885175f7a0f33c91fb5b7b682b6bab6"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6eb73fa5426ea69ee0e012fb59cdc76a15b1283d6e32e4f8dc4482ec67d1194d"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbeb989b5cc29e8daf7f976b421c220f1b8c731cbf22b9130d8815418ea45887"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32453c1de775c889eb4e22f1197fe3bdfe457d16476ea407472b9442e6295f7a"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693945278a31f2086d9bf3df0fe8254bbeaef1fe71e1351c3bd730aa7d31c41b"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:1d0ce09d36d53bbbe566fe296965b23b961764c0bcf3ce2fa45f463745c04701"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3a670dc61eb0d0eb7080890c13de3066790f9049b47b0de04007090807c776b0"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:dca69045298ce5c11fd539682cff879cc1e664c245d1c64da929813e54241d11"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a06339f38e9ed3a64e4c4e43aec7f59084033647f908e4259d279a52d3757d09"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b7f2f9f912dca3934c1baec2e4585a674ef16fe00218d833856408c48d5beee7"}, + {file = "frozenlist-1.4.1-cp38-cp38-win32.whl", hash = "sha256:e7004be74cbb7d9f34553a5ce5fb08be14fb33bc86f332fb71cbe5216362a497"}, + {file = "frozenlist-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:5a7d70357e7cee13f470c7883a063aae5fe209a493c57d86eb7f5a6f910fae09"}, + {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bfa4a17e17ce9abf47a74ae02f32d014c5e9404b6d9ac7f729e01562bbee601e"}, + {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b7e3ed87d4138356775346e6845cccbe66cd9e207f3cd11d2f0b9fd13681359d"}, + {file = "frozenlist-1.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c99169d4ff810155ca50b4da3b075cbde79752443117d89429595c2e8e37fed8"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edb678da49d9f72c9f6c609fbe41a5dfb9a9282f9e6a2253d5a91e0fc382d7c0"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6db4667b187a6742b33afbbaf05a7bc551ffcf1ced0000a571aedbb4aa42fc7b"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55fdc093b5a3cb41d420884cdaf37a1e74c3c37a31f46e66286d9145d2063bd0"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82e8211d69a4f4bc360ea22cd6555f8e61a1bd211d1d5d39d3d228b48c83a897"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89aa2c2eeb20957be2d950b85974b30a01a762f3308cd02bb15e1ad632e22dc7"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9d3e0c25a2350080e9319724dede4f31f43a6c9779be48021a7f4ebde8b2d742"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7268252af60904bf52c26173cbadc3a071cece75f873705419c8681f24d3edea"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:0c250a29735d4f15321007fb02865f0e6b6a41a6b88f1f523ca1596ab5f50bd5"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:96ec70beabbd3b10e8bfe52616a13561e58fe84c0101dd031dc78f250d5128b9"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:23b2d7679b73fe0e5a4560b672a39f98dfc6f60df63823b0a9970525325b95f6"}, + {file = "frozenlist-1.4.1-cp39-cp39-win32.whl", hash = "sha256:a7496bfe1da7fb1a4e1cc23bb67c58fab69311cc7d32b5a99c2007b4b2a0e932"}, + {file = "frozenlist-1.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:e6a20a581f9ce92d389a8c7d7c3dd47c81fd5d6e655c8dddf341e14aa48659d0"}, + {file = "frozenlist-1.4.1-py3-none-any.whl", hash = "sha256:04ced3e6a46b4cfffe20f9ae482818e34eba9b5fb0ce4056e4cc9b6e212d09b7"}, + {file = "frozenlist-1.4.1.tar.gz", hash = "sha256:c037a86e8513059a2613aaba4d817bb90b9d9b6b69aace3ce9c877e8c8ed402b"}, +] + +[[package]] +name = "fsspec" +version = "2023.12.2" +description = "File-system specification" +optional = false +python-versions = ">=3.8" +files = [ + {file = "fsspec-2023.12.2-py3-none-any.whl", hash = "sha256:d800d87f72189a745fa3d6b033b9dc4a34ad069f60ca60b943a63599f5501960"}, + {file = "fsspec-2023.12.2.tar.gz", hash = "sha256:8548d39e8810b59c38014934f6b31e57f40c1b20f911f4cc2b85389c7e9bf0cb"}, +] + +[package.extras] +abfs = ["adlfs"] +adl = ["adlfs"] +arrow = ["pyarrow (>=1)"] +dask = ["dask", "distributed"] +devel = ["pytest", "pytest-cov"] +dropbox = ["dropbox", "dropboxdrivefs", "requests"] +full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "dask", "distributed", "dropbox", "dropboxdrivefs", "fusepy", "gcsfs", "libarchive-c", "ocifs", "panel", "paramiko", "pyarrow (>=1)", "pygit2", "requests", "s3fs", "smbprotocol", "tqdm"] +fuse = ["fusepy"] +gcs = ["gcsfs"] +git = ["pygit2"] +github = ["requests"] +gs = ["gcsfs"] +gui = ["panel"] +hdfs = ["pyarrow (>=1)"] +http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "requests"] +libarchive = ["libarchive-c"] +oci = ["ocifs"] +s3 = ["s3fs"] +sftp = ["paramiko"] +smb = ["smbprotocol"] +ssh = ["paramiko"] +tqdm = ["tqdm"] + +[[package]] +name = "ftfy" +version = "6.1.3" +description = "Fixes mojibake and other problems with Unicode, after the fact" +optional = false +python-versions = ">=3.8,<4" +files = [ + {file = "ftfy-6.1.3-py3-none-any.whl", hash = "sha256:e49c306c06a97f4986faa7a8740cfe3c13f3106e85bcec73eb629817e671557c"}, + {file = "ftfy-6.1.3.tar.gz", hash = "sha256:693274aead811cff24c1e8784165aa755cd2f6e442a5ec535c7d697f6422a422"}, +] + +[package.dependencies] +wcwidth = ">=0.2.12,<0.3.0" + +[[package]] +name = "gitdb" +version = "4.0.11" +description = "Git Object Database" +optional = false +python-versions = ">=3.7" +files = [ + {file = "gitdb-4.0.11-py3-none-any.whl", hash = "sha256:81a3407ddd2ee8df444cbacea00e2d038e40150acfa3001696fe0dcf1d3adfa4"}, + {file = "gitdb-4.0.11.tar.gz", hash = "sha256:bf5421126136d6d0af55bc1e7c1af1c397a34f5b7bd79e776cd3e89785c2b04b"}, +] + +[package.dependencies] +smmap = ">=3.0.1,<6" + +[[package]] +name = "gitpython" +version = "3.1.41" +description = "GitPython is a Python library used to interact with Git repositories" +optional = false +python-versions = ">=3.7" +files = [ + {file = "GitPython-3.1.41-py3-none-any.whl", hash = "sha256:c36b6634d069b3f719610175020a9aed919421c87552185b085e04fbbdb10b7c"}, + {file = "GitPython-3.1.41.tar.gz", hash = "sha256:ed66e624884f76df22c8e16066d567aaa5a37d5b5fa19db2c6df6f7156db9048"}, +] + +[package.dependencies] +gitdb = ">=4.0.1,<5" + +[package.extras] +test = ["black", "coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", "pytest (>=7.3.1)", "pytest-cov", "pytest-instafail", "pytest-mock", "pytest-sugar", "sumtypes"] + +[[package]] +name = "google-api-core" +version = "2.15.0" +description = "Google API client core library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google-api-core-2.15.0.tar.gz", hash = "sha256:abc978a72658f14a2df1e5e12532effe40f94f868f6e23d95133bd6abcca35ca"}, + {file = "google_api_core-2.15.0-py3-none-any.whl", hash = "sha256:2aa56d2be495551e66bbff7f729b790546f87d5c90e74781aa77233bcb395a8a"}, +] + +[package.dependencies] +google-auth = ">=2.14.1,<3.0.dev0" +googleapis-common-protos = ">=1.56.2,<2.0.dev0" +grpcio = [ + {version = ">=1.49.1,<2.0dev", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""}, + {version = ">=1.33.2,<2.0dev", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""}, +] +grpcio-status = [ + {version = ">=1.49.1,<2.0.dev0", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""}, + {version = ">=1.33.2,<2.0.dev0", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""}, +] +protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0.dev0" +requests = ">=2.18.0,<3.0.0.dev0" + +[package.extras] +grpc = ["grpcio (>=1.33.2,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "grpcio-status (>=1.33.2,<2.0.dev0)", "grpcio-status (>=1.49.1,<2.0.dev0)"] +grpcgcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] +grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] + +[[package]] +name = "google-api-python-client" +version = "2.114.0" +description = "Google API Client Library for Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google-api-python-client-2.114.0.tar.gz", hash = "sha256:e041bbbf60e682261281e9d64b4660035f04db1cccba19d1d68eebc24d1465ed"}, + {file = "google_api_python_client-2.114.0-py2.py3-none-any.whl", hash = "sha256:690e0bb67d70ff6dea4e8a5d3738639c105a478ac35da153d3b2a384064e9e1a"}, +] + +[package.dependencies] +google-api-core = ">=1.31.5,<2.0.dev0 || >2.3.0,<3.0.0.dev0" +google-auth = ">=1.19.0,<3.0.0.dev0" +google-auth-httplib2 = ">=0.1.0" +httplib2 = ">=0.15.0,<1.dev0" +uritemplate = ">=3.0.1,<5" + +[[package]] +name = "google-auth" +version = "2.26.2" +description = "Google Authentication Library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google-auth-2.26.2.tar.gz", hash = "sha256:97327dbbf58cccb58fc5a1712bba403ae76668e64814eb30f7316f7e27126b81"}, + {file = "google_auth-2.26.2-py2.py3-none-any.whl", hash = "sha256:3f445c8ce9b61ed6459aad86d8ccdba4a9afed841b2d1451a11ef4db08957424"}, +] + +[package.dependencies] +cachetools = ">=2.0.0,<6.0" +pyasn1-modules = ">=0.2.1" +rsa = ">=3.1.4,<5" + +[package.extras] +aiohttp = ["aiohttp (>=3.6.2,<4.0.0.dev0)", "requests (>=2.20.0,<3.0.0.dev0)"] +enterprise-cert = ["cryptography (==36.0.2)", "pyopenssl (==22.0.0)"] +pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] +reauth = ["pyu2f (>=0.1.5)"] +requests = ["requests (>=2.20.0,<3.0.0.dev0)"] + +[[package]] +name = "google-auth-httplib2" +version = "0.2.0" +description = "Google Authentication Library: httplib2 transport" +optional = false +python-versions = "*" +files = [ + {file = "google-auth-httplib2-0.2.0.tar.gz", hash = "sha256:38aa7badf48f974f1eb9861794e9c0cb2a0511a4ec0679b1f886d108f5640e05"}, + {file = "google_auth_httplib2-0.2.0-py2.py3-none-any.whl", hash = "sha256:b65a0a2123300dd71281a7bf6e64d65a0759287df52729bdd1ae2e47dc311a3d"}, +] + +[package.dependencies] +google-auth = "*" +httplib2 = ">=0.19.0" + +[[package]] +name = "google-cloud-appengine-logging" +version = "1.4.0" +description = "Google Cloud Appengine Logging API client library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google-cloud-appengine-logging-1.4.0.tar.gz", hash = "sha256:fe74f418d0b01ebebe83ae212abf051ad42692a636677e397de3d459e00d7b64"}, + {file = "google_cloud_appengine_logging-1.4.0-py2.py3-none-any.whl", hash = "sha256:226721903a2d50b6e51c43e59edb548c0bb08cc5f70e1a5f289d3edf2f09a8c9"}, +] + +[package.dependencies] +google-api-core = {version = ">=1.34.0,<2.0.dev0 || >=2.11.dev0,<3.0.0dev", extras = ["grpc"]} +proto-plus = ">=1.22.3,<2.0.0dev" +protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0dev" + +[[package]] +name = "google-cloud-audit-log" +version = "0.2.5" +description = "Google Cloud Audit Protos" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google-cloud-audit-log-0.2.5.tar.gz", hash = "sha256:86e2faba3383adc8fd04a5bd7fd4f960b3e4aedaa7ed950f2f891ce16902eb6b"}, + {file = "google_cloud_audit_log-0.2.5-py2.py3-none-any.whl", hash = "sha256:18b94d4579002a450b7902cd2e8b8fdcb1ea2dd4df3b41f8f82be6d9f7fcd746"}, +] + +[package.dependencies] +googleapis-common-protos = ">=1.56.2,<2.0dev" +protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0dev" + +[[package]] +name = "google-cloud-core" +version = "2.4.1" +description = "Google Cloud API client core library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google-cloud-core-2.4.1.tar.gz", hash = "sha256:9b7749272a812bde58fff28868d0c5e2f585b82f37e09a1f6ed2d4d10f134073"}, + {file = "google_cloud_core-2.4.1-py2.py3-none-any.whl", hash = "sha256:a9e6a4422b9ac5c29f79a0ede9485473338e2ce78d91f2370c01e730eab22e61"}, +] + +[package.dependencies] +google-api-core = ">=1.31.6,<2.0.dev0 || >2.3.0,<3.0.0dev" +google-auth = ">=1.25.0,<3.0dev" + +[package.extras] +grpc = ["grpcio (>=1.38.0,<2.0dev)", "grpcio-status (>=1.38.0,<2.0.dev0)"] + +[[package]] +name = "google-cloud-logging" +version = "3.9.0" +description = "Stackdriver Logging API client library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google-cloud-logging-3.9.0.tar.gz", hash = "sha256:4decb1b0bed4a0e3c0e58a376646e6002d6be7cad039e3466822e8665072ea33"}, + {file = "google_cloud_logging-3.9.0-py2.py3-none-any.whl", hash = "sha256:094a2db068ff7f38c9e0c1017673fa49c0768fbae02769e03e06baa30f138b87"}, +] + +[package.dependencies] +google-api-core = {version = ">=1.33.2,<2.0.dev0 || >=2.8.dev0,<3.0.0dev", extras = ["grpc"]} +google-cloud-appengine-logging = ">=0.1.0,<2.0.0dev" +google-cloud-audit-log = ">=0.1.0,<1.0.0dev" +google-cloud-core = ">=2.0.0,<3.0.0dev" +grpc-google-iam-v1 = ">=0.12.4,<1.0.0dev" +proto-plus = [ + {version = ">=1.22.2,<2.0.0dev", markers = "python_version >= \"3.11\""}, + {version = ">=1.22.0,<2.0.0dev", markers = "python_version < \"3.11\""}, +] +protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0dev" + +[[package]] +name = "google-cloud-storage" +version = "2.14.0" +description = "Google Cloud Storage API client library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google-cloud-storage-2.14.0.tar.gz", hash = "sha256:2d23fcf59b55e7b45336729c148bb1c464468c69d5efbaee30f7201dd90eb97e"}, + {file = "google_cloud_storage-2.14.0-py2.py3-none-any.whl", hash = "sha256:8641243bbf2a2042c16a6399551fbb13f062cbc9a2de38d6c0bb5426962e9dbd"}, +] + +[package.dependencies] +google-api-core = ">=1.31.5,<2.0.dev0 || >2.3.0,<3.0.0dev" +google-auth = ">=2.23.3,<3.0dev" +google-cloud-core = ">=2.3.0,<3.0dev" +google-crc32c = ">=1.0,<2.0dev" +google-resumable-media = ">=2.6.0" +requests = ">=2.18.0,<3.0.0dev" + +[package.extras] +protobuf = ["protobuf (<5.0.0dev)"] + +[[package]] +name = "google-crc32c" +version = "1.5.0" +description = "A python wrapper of the C library 'Google CRC32C'" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google-crc32c-1.5.0.tar.gz", hash = "sha256:89284716bc6a5a415d4eaa11b1726d2d60a0cd12aadf5439828353662ede9dd7"}, + {file = "google_crc32c-1.5.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:596d1f98fc70232fcb6590c439f43b350cb762fb5d61ce7b0e9db4539654cc13"}, + {file = "google_crc32c-1.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:be82c3c8cfb15b30f36768797a640e800513793d6ae1724aaaafe5bf86f8f346"}, + {file = "google_crc32c-1.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:461665ff58895f508e2866824a47bdee72497b091c730071f2b7575d5762ab65"}, + {file = "google_crc32c-1.5.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e2096eddb4e7c7bdae4bd69ad364e55e07b8316653234a56552d9c988bd2d61b"}, + {file = "google_crc32c-1.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:116a7c3c616dd14a3de8c64a965828b197e5f2d121fedd2f8c5585c547e87b02"}, + {file = "google_crc32c-1.5.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5829b792bf5822fd0a6f6eb34c5f81dd074f01d570ed7f36aa101d6fc7a0a6e4"}, + {file = "google_crc32c-1.5.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:64e52e2b3970bd891309c113b54cf0e4384762c934d5ae56e283f9a0afcd953e"}, + {file = "google_crc32c-1.5.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:02ebb8bf46c13e36998aeaad1de9b48f4caf545e91d14041270d9dca767b780c"}, + {file = "google_crc32c-1.5.0-cp310-cp310-win32.whl", hash = "sha256:2e920d506ec85eb4ba50cd4228c2bec05642894d4c73c59b3a2fe20346bd00ee"}, + {file = "google_crc32c-1.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:07eb3c611ce363c51a933bf6bd7f8e3878a51d124acfc89452a75120bc436289"}, + {file = "google_crc32c-1.5.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:cae0274952c079886567f3f4f685bcaf5708f0a23a5f5216fdab71f81a6c0273"}, + {file = "google_crc32c-1.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1034d91442ead5a95b5aaef90dbfaca8633b0247d1e41621d1e9f9db88c36298"}, + {file = "google_crc32c-1.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c42c70cd1d362284289c6273adda4c6af8039a8ae12dc451dcd61cdabb8ab57"}, + {file = "google_crc32c-1.5.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8485b340a6a9e76c62a7dce3c98e5f102c9219f4cfbf896a00cf48caf078d438"}, + {file = "google_crc32c-1.5.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77e2fd3057c9d78e225fa0a2160f96b64a824de17840351b26825b0848022906"}, + {file = "google_crc32c-1.5.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f583edb943cf2e09c60441b910d6a20b4d9d626c75a36c8fcac01a6c96c01183"}, + {file = "google_crc32c-1.5.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:a1fd716e7a01f8e717490fbe2e431d2905ab8aa598b9b12f8d10abebb36b04dd"}, + {file = "google_crc32c-1.5.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:72218785ce41b9cfd2fc1d6a017dc1ff7acfc4c17d01053265c41a2c0cc39b8c"}, + {file = "google_crc32c-1.5.0-cp311-cp311-win32.whl", hash = "sha256:66741ef4ee08ea0b2cc3c86916ab66b6aef03768525627fd6a1b34968b4e3709"}, + {file = "google_crc32c-1.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:ba1eb1843304b1e5537e1fca632fa894d6f6deca8d6389636ee5b4797affb968"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:98cb4d057f285bd80d8778ebc4fde6b4d509ac3f331758fb1528b733215443ae"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd8536e902db7e365f49e7d9029283403974ccf29b13fc7028b97e2295b33556"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:19e0a019d2c4dcc5e598cd4a4bc7b008546b0358bd322537c74ad47a5386884f"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02c65b9817512edc6a4ae7c7e987fea799d2e0ee40c53ec573a692bee24de876"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6ac08d24c1f16bd2bf5eca8eaf8304812f44af5cfe5062006ec676e7e1d50afc"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:3359fc442a743e870f4588fcf5dcbc1bf929df1fad8fb9905cd94e5edb02e84c"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1e986b206dae4476f41bcec1faa057851f3889503a70e1bdb2378d406223994a"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:de06adc872bcd8c2a4e0dc51250e9e65ef2ca91be023b9d13ebd67c2ba552e1e"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-win32.whl", hash = "sha256:d3515f198eaa2f0ed49f8819d5732d70698c3fa37384146079b3799b97667a94"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-win_amd64.whl", hash = "sha256:67b741654b851abafb7bc625b6d1cdd520a379074e64b6a128e3b688c3c04740"}, + {file = "google_crc32c-1.5.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c02ec1c5856179f171e032a31d6f8bf84e5a75c45c33b2e20a3de353b266ebd8"}, + {file = "google_crc32c-1.5.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:edfedb64740750e1a3b16152620220f51d58ff1b4abceb339ca92e934775c27a"}, + {file = "google_crc32c-1.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:84e6e8cd997930fc66d5bb4fde61e2b62ba19d62b7abd7a69920406f9ecca946"}, + {file = "google_crc32c-1.5.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:024894d9d3cfbc5943f8f230e23950cd4906b2fe004c72e29b209420a1e6b05a"}, + {file = "google_crc32c-1.5.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:998679bf62b7fb599d2878aa3ed06b9ce688b8974893e7223c60db155f26bd8d"}, + {file = "google_crc32c-1.5.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:83c681c526a3439b5cf94f7420471705bbf96262f49a6fe546a6db5f687a3d4a"}, + {file = "google_crc32c-1.5.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4c6fdd4fccbec90cc8a01fc00773fcd5fa28db683c116ee3cb35cd5da9ef6c37"}, + {file = "google_crc32c-1.5.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5ae44e10a8e3407dbe138984f21e536583f2bba1be9491239f942c2464ac0894"}, + {file = "google_crc32c-1.5.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:37933ec6e693e51a5b07505bd05de57eee12f3e8c32b07da7e73669398e6630a"}, + {file = "google_crc32c-1.5.0-cp38-cp38-win32.whl", hash = "sha256:fe70e325aa68fa4b5edf7d1a4b6f691eb04bbccac0ace68e34820d283b5f80d4"}, + {file = "google_crc32c-1.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:74dea7751d98034887dbd821b7aae3e1d36eda111d6ca36c206c44478035709c"}, + {file = "google_crc32c-1.5.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c6c777a480337ac14f38564ac88ae82d4cd238bf293f0a22295b66eb89ffced7"}, + {file = "google_crc32c-1.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:759ce4851a4bb15ecabae28f4d2e18983c244eddd767f560165563bf9aefbc8d"}, + {file = "google_crc32c-1.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f13cae8cc389a440def0c8c52057f37359014ccbc9dc1f0827936bcd367c6100"}, + {file = "google_crc32c-1.5.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e560628513ed34759456a416bf86b54b2476c59144a9138165c9a1575801d0d9"}, + {file = "google_crc32c-1.5.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1674e4307fa3024fc897ca774e9c7562c957af85df55efe2988ed9056dc4e57"}, + {file = "google_crc32c-1.5.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:278d2ed7c16cfc075c91378c4f47924c0625f5fc84b2d50d921b18b7975bd210"}, + {file = "google_crc32c-1.5.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d5280312b9af0976231f9e317c20e4a61cd2f9629b7bfea6a693d1878a264ebd"}, + {file = "google_crc32c-1.5.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:8b87e1a59c38f275c0e3676fc2ab6d59eccecfd460be267ac360cc31f7bcde96"}, + {file = "google_crc32c-1.5.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7c074fece789b5034b9b1404a1f8208fc2d4c6ce9decdd16e8220c5a793e6f61"}, + {file = "google_crc32c-1.5.0-cp39-cp39-win32.whl", hash = "sha256:7f57f14606cd1dd0f0de396e1e53824c371e9544a822648cd76c034d209b559c"}, + {file = "google_crc32c-1.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:a2355cba1f4ad8b6988a4ca3feed5bff33f6af2d7f134852cf279c2aebfde541"}, + {file = "google_crc32c-1.5.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:f314013e7dcd5cf45ab1945d92e713eec788166262ae8deb2cfacd53def27325"}, + {file = "google_crc32c-1.5.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b747a674c20a67343cb61d43fdd9207ce5da6a99f629c6e2541aa0e89215bcd"}, + {file = "google_crc32c-1.5.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8f24ed114432de109aa9fd317278518a5af2d31ac2ea6b952b2f7782b43da091"}, + {file = "google_crc32c-1.5.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8667b48e7a7ef66afba2c81e1094ef526388d35b873966d8a9a447974ed9178"}, + {file = "google_crc32c-1.5.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:1c7abdac90433b09bad6c43a43af253e688c9cfc1c86d332aed13f9a7c7f65e2"}, + {file = "google_crc32c-1.5.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6f998db4e71b645350b9ac28a2167e6632c239963ca9da411523bb439c5c514d"}, + {file = "google_crc32c-1.5.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c99616c853bb585301df6de07ca2cadad344fd1ada6d62bb30aec05219c45d2"}, + {file = "google_crc32c-1.5.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2ad40e31093a4af319dadf503b2467ccdc8f67c72e4bcba97f8c10cb078207b5"}, + {file = "google_crc32c-1.5.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd67cf24a553339d5062eff51013780a00d6f97a39ca062781d06b3a73b15462"}, + {file = "google_crc32c-1.5.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:398af5e3ba9cf768787eef45c803ff9614cc3e22a5b2f7d7ae116df8b11e3314"}, + {file = "google_crc32c-1.5.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b1f8133c9a275df5613a451e73f36c2aea4fe13c5c8997e22cf355ebd7bd0728"}, + {file = "google_crc32c-1.5.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ba053c5f50430a3fcfd36f75aff9caeba0440b2d076afdb79a318d6ca245f88"}, + {file = "google_crc32c-1.5.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:272d3892a1e1a2dbc39cc5cde96834c236d5327e2122d3aaa19f6614531bb6eb"}, + {file = "google_crc32c-1.5.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:635f5d4dd18758a1fbd1049a8e8d2fee4ffed124462d837d1a02a0e009c3ab31"}, + {file = "google_crc32c-1.5.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c672d99a345849301784604bfeaeba4db0c7aae50b95be04dd651fd2a7310b93"}, +] + +[package.extras] +testing = ["pytest"] + +[[package]] +name = "google-resumable-media" +version = "2.7.0" +description = "Utilities for Google Media Downloads and Resumable Uploads" +optional = false +python-versions = ">= 3.7" +files = [ + {file = "google-resumable-media-2.7.0.tar.gz", hash = "sha256:5f18f5fa9836f4b083162064a1c2c98c17239bfda9ca50ad970ccf905f3e625b"}, + {file = "google_resumable_media-2.7.0-py2.py3-none-any.whl", hash = "sha256:79543cfe433b63fd81c0844b7803aba1bb8950b47bedf7d980c38fa123937e08"}, +] + +[package.dependencies] +google-crc32c = ">=1.0,<2.0dev" + +[package.extras] +aiohttp = ["aiohttp (>=3.6.2,<4.0.0dev)", "google-auth (>=1.22.0,<2.0dev)"] +requests = ["requests (>=2.18.0,<3.0.0dev)"] + +[[package]] +name = "googleapis-common-protos" +version = "1.62.0" +description = "Common protobufs used in Google APIs" +optional = false +python-versions = ">=3.7" +files = [ + {file = "googleapis-common-protos-1.62.0.tar.gz", hash = "sha256:83f0ece9f94e5672cced82f592d2a5edf527a96ed1794f0bab36d5735c996277"}, + {file = "googleapis_common_protos-1.62.0-py2.py3-none-any.whl", hash = "sha256:4750113612205514f9f6aa4cb00d523a94f3e8c06c5ad2fee466387dc4875f07"}, +] + +[package.dependencies] +grpcio = {version = ">=1.44.0,<2.0.0.dev0", optional = true, markers = "extra == \"grpc\""} +protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0.dev0" + +[package.extras] +grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"] + +[[package]] +name = "greenlet" +version = "3.0.3" +description = "Lightweight in-process concurrent programming" +optional = false +python-versions = ">=3.7" +files = [ + {file = "greenlet-3.0.3-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:9da2bd29ed9e4f15955dd1595ad7bc9320308a3b766ef7f837e23ad4b4aac31a"}, + {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d353cadd6083fdb056bb46ed07e4340b0869c305c8ca54ef9da3421acbdf6881"}, + {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dca1e2f3ca00b84a396bc1bce13dd21f680f035314d2379c4160c98153b2059b"}, + {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3ed7fb269f15dc662787f4119ec300ad0702fa1b19d2135a37c2c4de6fadfd4a"}, + {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd4f49ae60e10adbc94b45c0b5e6a179acc1736cf7a90160b404076ee283cf83"}, + {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:73a411ef564e0e097dbe7e866bb2dda0f027e072b04da387282b02c308807405"}, + {file = "greenlet-3.0.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7f362975f2d179f9e26928c5b517524e89dd48530a0202570d55ad6ca5d8a56f"}, + {file = "greenlet-3.0.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:649dde7de1a5eceb258f9cb00bdf50e978c9db1b996964cd80703614c86495eb"}, + {file = "greenlet-3.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:68834da854554926fbedd38c76e60c4a2e3198c6fbed520b106a8986445caaf9"}, + {file = "greenlet-3.0.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:b1b5667cced97081bf57b8fa1d6bfca67814b0afd38208d52538316e9422fc61"}, + {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52f59dd9c96ad2fc0d5724107444f76eb20aaccb675bf825df6435acb7703559"}, + {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:afaff6cf5200befd5cec055b07d1c0a5a06c040fe5ad148abcd11ba6ab9b114e"}, + {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fe754d231288e1e64323cfad462fcee8f0288654c10bdf4f603a39ed923bef33"}, + {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2797aa5aedac23af156bbb5a6aa2cd3427ada2972c828244eb7d1b9255846379"}, + {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b7f009caad047246ed379e1c4dbcb8b020f0a390667ea74d2387be2998f58a22"}, + {file = "greenlet-3.0.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c5e1536de2aad7bf62e27baf79225d0d64360d4168cf2e6becb91baf1ed074f3"}, + {file = "greenlet-3.0.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:894393ce10ceac937e56ec00bb71c4c2f8209ad516e96033e4b3b1de270e200d"}, + {file = "greenlet-3.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:1ea188d4f49089fc6fb283845ab18a2518d279c7cd9da1065d7a84e991748728"}, + {file = "greenlet-3.0.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:70fb482fdf2c707765ab5f0b6655e9cfcf3780d8d87355a063547b41177599be"}, + {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4d1ac74f5c0c0524e4a24335350edad7e5f03b9532da7ea4d3c54d527784f2e"}, + {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:149e94a2dd82d19838fe4b2259f1b6b9957d5ba1b25640d2380bea9c5df37676"}, + {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15d79dd26056573940fcb8c7413d84118086f2ec1a8acdfa854631084393efcc"}, + {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b7db1ebff4ba09aaaeae6aa491daeb226c8150fc20e836ad00041bcb11230"}, + {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fcd2469d6a2cf298f198f0487e0a5b1a47a42ca0fa4dfd1b6862c999f018ebbf"}, + {file = "greenlet-3.0.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1f672519db1796ca0d8753f9e78ec02355e862d0998193038c7073045899f305"}, + {file = "greenlet-3.0.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2516a9957eed41dd8f1ec0c604f1cdc86758b587d964668b5b196a9db5bfcde6"}, + {file = "greenlet-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:bba5387a6975598857d86de9eac14210a49d554a77eb8261cc68b7d082f78ce2"}, + {file = "greenlet-3.0.3-cp37-cp37m-macosx_11_0_universal2.whl", hash = "sha256:5b51e85cb5ceda94e79d019ed36b35386e8c37d22f07d6a751cb659b180d5274"}, + {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:daf3cb43b7cf2ba96d614252ce1684c1bccee6b2183a01328c98d36fcd7d5cb0"}, + {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99bf650dc5d69546e076f413a87481ee1d2d09aaaaaca058c9251b6d8c14783f"}, + {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2dd6e660effd852586b6a8478a1d244b8dc90ab5b1321751d2ea15deb49ed414"}, + {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3391d1e16e2a5a1507d83e4a8b100f4ee626e8eca43cf2cadb543de69827c4c"}, + {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e1f145462f1fa6e4a4ae3c0f782e580ce44d57c8f2c7aae1b6fa88c0b2efdb41"}, + {file = "greenlet-3.0.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1a7191e42732df52cb5f39d3527217e7ab73cae2cb3694d241e18f53d84ea9a7"}, + {file = "greenlet-3.0.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0448abc479fab28b00cb472d278828b3ccca164531daab4e970a0458786055d6"}, + {file = "greenlet-3.0.3-cp37-cp37m-win32.whl", hash = "sha256:b542be2440edc2d48547b5923c408cbe0fc94afb9f18741faa6ae970dbcb9b6d"}, + {file = "greenlet-3.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:01bc7ea167cf943b4c802068e178bbf70ae2e8c080467070d01bfa02f337ee67"}, + {file = "greenlet-3.0.3-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:1996cb9306c8595335bb157d133daf5cf9f693ef413e7673cb07e3e5871379ca"}, + {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ddc0f794e6ad661e321caa8d2f0a55ce01213c74722587256fb6566049a8b04"}, + {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9db1c18f0eaad2f804728c67d6c610778456e3e1cc4ab4bbd5eeb8e6053c6fc"}, + {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7170375bcc99f1a2fbd9c306f5be8764eaf3ac6b5cb968862cad4c7057756506"}, + {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b66c9c1e7ccabad3a7d037b2bcb740122a7b17a53734b7d72a344ce39882a1b"}, + {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:098d86f528c855ead3479afe84b49242e174ed262456c342d70fc7f972bc13c4"}, + {file = "greenlet-3.0.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:81bb9c6d52e8321f09c3d165b2a78c680506d9af285bfccbad9fb7ad5a5da3e5"}, + {file = "greenlet-3.0.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fd096eb7ffef17c456cfa587523c5f92321ae02427ff955bebe9e3c63bc9f0da"}, + {file = "greenlet-3.0.3-cp38-cp38-win32.whl", hash = "sha256:d46677c85c5ba00a9cb6f7a00b2bfa6f812192d2c9f7d9c4f6a55b60216712f3"}, + {file = "greenlet-3.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:419b386f84949bf0e7c73e6032e3457b82a787c1ab4a0e43732898a761cc9dbf"}, + {file = "greenlet-3.0.3-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:da70d4d51c8b306bb7a031d5cff6cc25ad253affe89b70352af5f1cb68e74b53"}, + {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:086152f8fbc5955df88382e8a75984e2bb1c892ad2e3c80a2508954e52295257"}, + {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d73a9fe764d77f87f8ec26a0c85144d6a951a6c438dfe50487df5595c6373eac"}, + {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b7dcbe92cc99f08c8dd11f930de4d99ef756c3591a5377d1d9cd7dd5e896da71"}, + {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1551a8195c0d4a68fac7a4325efac0d541b48def35feb49d803674ac32582f61"}, + {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:64d7675ad83578e3fc149b617a444fab8efdafc9385471f868eb5ff83e446b8b"}, + {file = "greenlet-3.0.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b37eef18ea55f2ffd8f00ff8fe7c8d3818abd3e25fb73fae2ca3b672e333a7a6"}, + {file = "greenlet-3.0.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:77457465d89b8263bca14759d7c1684df840b6811b2499838cc5b040a8b5b113"}, + {file = "greenlet-3.0.3-cp39-cp39-win32.whl", hash = "sha256:57e8974f23e47dac22b83436bdcf23080ade568ce77df33159e019d161ce1d1e"}, + {file = "greenlet-3.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:c5ee858cfe08f34712f548c3c363e807e7186f03ad7a5039ebadb29e8c6be067"}, + {file = "greenlet-3.0.3.tar.gz", hash = "sha256:43374442353259554ce33599da8b692d5aa96f8976d567d4badf263371fbe491"}, +] + +[package.extras] +docs = ["Sphinx", "furo"] +test = ["objgraph", "psutil"] + +[[package]] +name = "grpc-google-iam-v1" +version = "0.13.0" +description = "IAM API client library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "grpc-google-iam-v1-0.13.0.tar.gz", hash = "sha256:fad318608b9e093258fbf12529180f400d1c44453698a33509cc6ecf005b294e"}, + {file = "grpc_google_iam_v1-0.13.0-py2.py3-none-any.whl", hash = "sha256:53902e2af7de8df8c1bd91373d9be55b0743ec267a7428ea638db3775becae89"}, +] + +[package.dependencies] +googleapis-common-protos = {version = ">=1.56.0,<2.0.0dev", extras = ["grpc"]} +grpcio = ">=1.44.0,<2.0.0dev" +protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0dev" + +[[package]] +name = "grpcio" +version = "1.60.0" +description = "HTTP/2-based RPC framework" +optional = false +python-versions = ">=3.7" +files = [ + {file = "grpcio-1.60.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:d020cfa595d1f8f5c6b343530cd3ca16ae5aefdd1e832b777f9f0eb105f5b139"}, + {file = "grpcio-1.60.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:b98f43fcdb16172dec5f4b49f2fece4b16a99fd284d81c6bbac1b3b69fcbe0ff"}, + {file = "grpcio-1.60.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:20e7a4f7ded59097c84059d28230907cd97130fa74f4a8bfd1d8e5ba18c81491"}, + {file = "grpcio-1.60.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:452ca5b4afed30e7274445dd9b441a35ece656ec1600b77fff8c216fdf07df43"}, + {file = "grpcio-1.60.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:43e636dc2ce9ece583b3e2ca41df5c983f4302eabc6d5f9cd04f0562ee8ec1ae"}, + {file = "grpcio-1.60.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6e306b97966369b889985a562ede9d99180def39ad42c8014628dd3cc343f508"}, + {file = "grpcio-1.60.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f897c3b127532e6befdcf961c415c97f320d45614daf84deba0a54e64ea2457b"}, + {file = "grpcio-1.60.0-cp310-cp310-win32.whl", hash = "sha256:b87efe4a380887425bb15f220079aa8336276398dc33fce38c64d278164f963d"}, + {file = "grpcio-1.60.0-cp310-cp310-win_amd64.whl", hash = "sha256:a9c7b71211f066908e518a2ef7a5e211670761651039f0d6a80d8d40054047df"}, + {file = "grpcio-1.60.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:fb464479934778d7cc5baf463d959d361954d6533ad34c3a4f1d267e86ee25fd"}, + {file = "grpcio-1.60.0-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:4b44d7e39964e808b071714666a812049765b26b3ea48c4434a3b317bac82f14"}, + {file = "grpcio-1.60.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:90bdd76b3f04bdb21de5398b8a7c629676c81dfac290f5f19883857e9371d28c"}, + {file = "grpcio-1.60.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:91229d7203f1ef0ab420c9b53fe2ca5c1fbeb34f69b3bc1b5089466237a4a134"}, + {file = "grpcio-1.60.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b36a2c6d4920ba88fa98075fdd58ff94ebeb8acc1215ae07d01a418af4c0253"}, + {file = "grpcio-1.60.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:297eef542156d6b15174a1231c2493ea9ea54af8d016b8ca7d5d9cc65cfcc444"}, + {file = "grpcio-1.60.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:87c9224acba0ad8bacddf427a1c2772e17ce50b3042a789547af27099c5f751d"}, + {file = "grpcio-1.60.0-cp311-cp311-win32.whl", hash = "sha256:95ae3e8e2c1b9bf671817f86f155c5da7d49a2289c5cf27a319458c3e025c320"}, + {file = "grpcio-1.60.0-cp311-cp311-win_amd64.whl", hash = "sha256:467a7d31554892eed2aa6c2d47ded1079fc40ea0b9601d9f79204afa8902274b"}, + {file = "grpcio-1.60.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:a7152fa6e597c20cb97923407cf0934e14224af42c2b8d915f48bc3ad2d9ac18"}, + {file = "grpcio-1.60.0-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:7db16dd4ea1b05ada504f08d0dca1cd9b926bed3770f50e715d087c6f00ad748"}, + {file = "grpcio-1.60.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:b0571a5aef36ba9177e262dc88a9240c866d903a62799e44fd4aae3f9a2ec17e"}, + {file = "grpcio-1.60.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6fd9584bf1bccdfff1512719316efa77be235469e1e3295dce64538c4773840b"}, + {file = "grpcio-1.60.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d6a478581b1a1a8fdf3318ecb5f4d0cda41cacdffe2b527c23707c9c1b8fdb55"}, + {file = "grpcio-1.60.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:77c8a317f0fd5a0a2be8ed5cbe5341537d5c00bb79b3bb27ba7c5378ba77dbca"}, + {file = "grpcio-1.60.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1c30bb23a41df95109db130a6cc1b974844300ae2e5d68dd4947aacba5985aa5"}, + {file = "grpcio-1.60.0-cp312-cp312-win32.whl", hash = "sha256:2aef56e85901c2397bd557c5ba514f84de1f0ae5dd132f5d5fed042858115951"}, + {file = "grpcio-1.60.0-cp312-cp312-win_amd64.whl", hash = "sha256:e381fe0c2aa6c03b056ad8f52f8efca7be29fb4d9ae2f8873520843b6039612a"}, + {file = "grpcio-1.60.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:92f88ca1b956eb8427a11bb8b4a0c0b2b03377235fc5102cb05e533b8693a415"}, + {file = "grpcio-1.60.0-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:e278eafb406f7e1b1b637c2cf51d3ad45883bb5bd1ca56bc05e4fc135dfdaa65"}, + {file = "grpcio-1.60.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:a48edde788b99214613e440fce495bbe2b1e142a7f214cce9e0832146c41e324"}, + {file = "grpcio-1.60.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de2ad69c9a094bf37c1102b5744c9aec6cf74d2b635558b779085d0263166454"}, + {file = "grpcio-1.60.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:073f959c6f570797272f4ee9464a9997eaf1e98c27cb680225b82b53390d61e6"}, + {file = "grpcio-1.60.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c826f93050c73e7769806f92e601e0efdb83ec8d7c76ddf45d514fee54e8e619"}, + {file = "grpcio-1.60.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:9e30be89a75ee66aec7f9e60086fadb37ff8c0ba49a022887c28c134341f7179"}, + {file = "grpcio-1.60.0-cp37-cp37m-win_amd64.whl", hash = "sha256:b0fb2d4801546598ac5cd18e3ec79c1a9af8b8f2a86283c55a5337c5aeca4b1b"}, + {file = "grpcio-1.60.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:9073513ec380434eb8d21970e1ab3161041de121f4018bbed3146839451a6d8e"}, + {file = "grpcio-1.60.0-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:74d7d9fa97809c5b892449b28a65ec2bfa458a4735ddad46074f9f7d9550ad13"}, + {file = "grpcio-1.60.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:1434ca77d6fed4ea312901122dc8da6c4389738bf5788f43efb19a838ac03ead"}, + {file = "grpcio-1.60.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e61e76020e0c332a98290323ecfec721c9544f5b739fab925b6e8cbe1944cf19"}, + {file = "grpcio-1.60.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675997222f2e2f22928fbba640824aebd43791116034f62006e19730715166c0"}, + {file = "grpcio-1.60.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5208a57eae445ae84a219dfd8b56e04313445d146873117b5fa75f3245bc1390"}, + {file = "grpcio-1.60.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:428d699c8553c27e98f4d29fdc0f0edc50e9a8a7590bfd294d2edb0da7be3629"}, + {file = "grpcio-1.60.0-cp38-cp38-win32.whl", hash = "sha256:83f2292ae292ed5a47cdcb9821039ca8e88902923198f2193f13959360c01860"}, + {file = "grpcio-1.60.0-cp38-cp38-win_amd64.whl", hash = "sha256:705a68a973c4c76db5d369ed573fec3367d7d196673fa86614b33d8c8e9ebb08"}, + {file = "grpcio-1.60.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:c193109ca4070cdcaa6eff00fdb5a56233dc7610216d58fb81638f89f02e4968"}, + {file = "grpcio-1.60.0-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:676e4a44e740deaba0f4d95ba1d8c5c89a2fcc43d02c39f69450b1fa19d39590"}, + {file = "grpcio-1.60.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:5ff21e000ff2f658430bde5288cb1ac440ff15c0d7d18b5fb222f941b46cb0d2"}, + {file = "grpcio-1.60.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c86343cf9ff7b2514dd229bdd88ebba760bd8973dac192ae687ff75e39ebfab"}, + {file = "grpcio-1.60.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fd3b3968ffe7643144580f260f04d39d869fcc2cddb745deef078b09fd2b328"}, + {file = "grpcio-1.60.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:30943b9530fe3620e3b195c03130396cd0ee3a0d10a66c1bee715d1819001eaf"}, + {file = "grpcio-1.60.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b10241250cb77657ab315270b064a6c7f1add58af94befa20687e7c8d8603ae6"}, + {file = "grpcio-1.60.0-cp39-cp39-win32.whl", hash = "sha256:79a050889eb8d57a93ed21d9585bb63fca881666fc709f5d9f7f9372f5e7fd03"}, + {file = "grpcio-1.60.0-cp39-cp39-win_amd64.whl", hash = "sha256:8a97a681e82bc11a42d4372fe57898d270a2707f36c45c6676e49ce0d5c41353"}, + {file = "grpcio-1.60.0.tar.gz", hash = "sha256:2199165a1affb666aa24adf0c97436686d0a61bc5fc113c037701fb7c7fceb96"}, +] + +[package.extras] +protobuf = ["grpcio-tools (>=1.60.0)"] + +[[package]] +name = "grpcio-status" +version = "1.60.0" +description = "Status proto mapping for gRPC" +optional = false +python-versions = ">=3.6" +files = [ + {file = "grpcio-status-1.60.0.tar.gz", hash = "sha256:f10e0b6db3adc0fdc244b71962814ee982996ef06186446b5695b9fa635aa1ab"}, + {file = "grpcio_status-1.60.0-py3-none-any.whl", hash = "sha256:7d383fa36e59c1e61d380d91350badd4d12ac56e4de2c2b831b050362c3c572e"}, +] + +[package.dependencies] +googleapis-common-protos = ">=1.5.5" +grpcio = ">=1.60.0" +protobuf = ">=4.21.6" + +[[package]] +name = "gtts" +version = "2.5.0" +description = "gTTS (Google Text-to-Speech), a Python library and CLI tool to interface with Google Translate text-to-speech API" +optional = false +python-versions = ">=3.7" +files = [ + {file = "gTTS-2.5.0-py3-none-any.whl", hash = "sha256:181eca837c8c9ae280160573d76e8a340c1c54055c966ad11b4f4bf1b8e3194a"}, + {file = "gTTS-2.5.0.tar.gz", hash = "sha256:d6d589b1759948490fbabd7a11d1799eeafdde8ba90580194918a18dc078ec49"}, +] + +[package.dependencies] +click = ">=7.1,<8.2" +requests = ">=2.27,<3" + +[package.extras] +docs = ["sphinx", "sphinx-autobuild", "sphinx-click", "sphinx-mdinclude", "sphinx-rtd-theme"] +tests = ["pytest (>=7.1.3,<7.2.0)", "pytest-cov", "testfixtures"] + +[[package]] +name = "h11" +version = "0.14.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +optional = false +python-versions = ">=3.7" +files = [ + {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, + {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, +] + +[[package]] +name = "h2" +version = "4.1.0" +description = "HTTP/2 State-Machine based protocol implementation" +optional = false +python-versions = ">=3.6.1" +files = [ + {file = "h2-4.1.0-py3-none-any.whl", hash = "sha256:03a46bcf682256c95b5fd9e9a99c1323584c3eec6440d379b9903d709476bc6d"}, + {file = "h2-4.1.0.tar.gz", hash = "sha256:a83aca08fbe7aacb79fec788c9c0bac936343560ed9ec18b82a13a12c28d2abb"}, +] + +[package.dependencies] +hpack = ">=4.0,<5" +hyperframe = ">=6.0,<7" + +[[package]] +name = "hpack" +version = "4.0.0" +description = "Pure-Python HPACK header compression" +optional = false +python-versions = ">=3.6.1" +files = [ + {file = "hpack-4.0.0-py3-none-any.whl", hash = "sha256:84a076fad3dc9a9f8063ccb8041ef100867b1878b25ef0ee63847a5d53818a6c"}, + {file = "hpack-4.0.0.tar.gz", hash = "sha256:fc41de0c63e687ebffde81187a948221294896f6bdc0ae2312708df339430095"}, +] + +[[package]] +name = "httpcore" +version = "0.17.3" +description = "A minimal low-level HTTP client." +optional = false +python-versions = ">=3.7" +files = [ + {file = "httpcore-0.17.3-py3-none-any.whl", hash = "sha256:c2789b767ddddfa2a5782e3199b2b7f6894540b17b16ec26b2c4d8e103510b87"}, + {file = "httpcore-0.17.3.tar.gz", hash = "sha256:a6f30213335e34c1ade7be6ec7c47f19f50c56db36abef1a9dfa3815b1cb3888"}, +] + +[package.dependencies] +anyio = ">=3.0,<5.0" +certifi = "*" +h11 = ">=0.13,<0.15" +sniffio = "==1.*" + +[package.extras] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] + +[[package]] +name = "httplib2" +version = "0.22.0" +description = "A comprehensive HTTP client library." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "httplib2-0.22.0-py3-none-any.whl", hash = "sha256:14ae0a53c1ba8f3d37e9e27cf37eabb0fb9980f435ba405d546948b009dd64dc"}, + {file = "httplib2-0.22.0.tar.gz", hash = "sha256:d7a10bc5ef5ab08322488bde8c726eeee5c8618723fdb399597ec58f3d82df81"}, +] + +[package.dependencies] +pyparsing = {version = ">=2.4.2,<3.0.0 || >3.0.0,<3.0.1 || >3.0.1,<3.0.2 || >3.0.2,<3.0.3 || >3.0.3,<4", markers = "python_version > \"3.0\""} + +[[package]] +name = "httptools" +version = "0.6.1" +description = "A collection of framework independent HTTP protocol utils." +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "httptools-0.6.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d2f6c3c4cb1948d912538217838f6e9960bc4a521d7f9b323b3da579cd14532f"}, + {file = "httptools-0.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:00d5d4b68a717765b1fabfd9ca755bd12bf44105eeb806c03d1962acd9b8e563"}, + {file = "httptools-0.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:639dc4f381a870c9ec860ce5c45921db50205a37cc3334e756269736ff0aac58"}, + {file = "httptools-0.6.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e57997ac7fb7ee43140cc03664de5f268813a481dff6245e0075925adc6aa185"}, + {file = "httptools-0.6.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0ac5a0ae3d9f4fe004318d64b8a854edd85ab76cffbf7ef5e32920faef62f142"}, + {file = "httptools-0.6.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3f30d3ce413088a98b9db71c60a6ada2001a08945cb42dd65a9a9fe228627658"}, + {file = "httptools-0.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:1ed99a373e327f0107cb513b61820102ee4f3675656a37a50083eda05dc9541b"}, + {file = "httptools-0.6.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7a7ea483c1a4485c71cb5f38be9db078f8b0e8b4c4dc0210f531cdd2ddac1ef1"}, + {file = "httptools-0.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:85ed077c995e942b6f1b07583e4eb0a8d324d418954fc6af913d36db7c05a5a0"}, + {file = "httptools-0.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b0bb634338334385351a1600a73e558ce619af390c2b38386206ac6a27fecfc"}, + {file = "httptools-0.6.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d9ceb2c957320def533671fc9c715a80c47025139c8d1f3797477decbc6edd2"}, + {file = "httptools-0.6.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4f0f8271c0a4db459f9dc807acd0eadd4839934a4b9b892f6f160e94da309837"}, + {file = "httptools-0.6.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6a4f5ccead6d18ec072ac0b84420e95d27c1cdf5c9f1bc8fbd8daf86bd94f43d"}, + {file = "httptools-0.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:5cceac09f164bcba55c0500a18fe3c47df29b62353198e4f37bbcc5d591172c3"}, + {file = "httptools-0.6.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:75c8022dca7935cba14741a42744eee13ba05db00b27a4b940f0d646bd4d56d0"}, + {file = "httptools-0.6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:48ed8129cd9a0d62cf4d1575fcf90fb37e3ff7d5654d3a5814eb3d55f36478c2"}, + {file = "httptools-0.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f58e335a1402fb5a650e271e8c2d03cfa7cea46ae124649346d17bd30d59c90"}, + {file = "httptools-0.6.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93ad80d7176aa5788902f207a4e79885f0576134695dfb0fefc15b7a4648d503"}, + {file = "httptools-0.6.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9bb68d3a085c2174c2477eb3ffe84ae9fb4fde8792edb7bcd09a1d8467e30a84"}, + {file = "httptools-0.6.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:b512aa728bc02354e5ac086ce76c3ce635b62f5fbc32ab7082b5e582d27867bb"}, + {file = "httptools-0.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:97662ce7fb196c785344d00d638fc9ad69e18ee4bfb4000b35a52efe5adcc949"}, + {file = "httptools-0.6.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:8e216a038d2d52ea13fdd9b9c9c7459fb80d78302b257828285eca1c773b99b3"}, + {file = "httptools-0.6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3e802e0b2378ade99cd666b5bffb8b2a7cc8f3d28988685dc300469ea8dd86cb"}, + {file = "httptools-0.6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4bd3e488b447046e386a30f07af05f9b38d3d368d1f7b4d8f7e10af85393db97"}, + {file = "httptools-0.6.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe467eb086d80217b7584e61313ebadc8d187a4d95bb62031b7bab4b205c3ba3"}, + {file = "httptools-0.6.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:3c3b214ce057c54675b00108ac42bacf2ab8f85c58e3f324a4e963bbc46424f4"}, + {file = "httptools-0.6.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8ae5b97f690badd2ca27cbf668494ee1b6d34cf1c464271ef7bfa9ca6b83ffaf"}, + {file = "httptools-0.6.1-cp38-cp38-win_amd64.whl", hash = "sha256:405784577ba6540fa7d6ff49e37daf104e04f4b4ff2d1ac0469eaa6a20fde084"}, + {file = "httptools-0.6.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:95fb92dd3649f9cb139e9c56604cc2d7c7bf0fc2e7c8d7fbd58f96e35eddd2a3"}, + {file = "httptools-0.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dcbab042cc3ef272adc11220517278519adf8f53fd3056d0e68f0a6f891ba94e"}, + {file = "httptools-0.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cf2372e98406efb42e93bfe10f2948e467edfd792b015f1b4ecd897903d3e8d"}, + {file = "httptools-0.6.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:678fcbae74477a17d103b7cae78b74800d795d702083867ce160fc202104d0da"}, + {file = "httptools-0.6.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e0b281cf5a125c35f7f6722b65d8542d2e57331be573e9e88bc8b0115c4a7a81"}, + {file = "httptools-0.6.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:95658c342529bba4e1d3d2b1a874db16c7cca435e8827422154c9da76ac4e13a"}, + {file = "httptools-0.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:7ebaec1bf683e4bf5e9fbb49b8cc36da482033596a415b3e4ebab5a4c0d7ec5e"}, + {file = "httptools-0.6.1.tar.gz", hash = "sha256:c6e26c30455600b95d94b1b836085138e82f177351454ee841c148f93a9bad5a"}, +] + +[package.extras] +test = ["Cython (>=0.29.24,<0.30.0)"] + +[[package]] +name = "httpx" +version = "0.24.1" +description = "The next generation HTTP client." +optional = false +python-versions = ">=3.7" +files = [ + {file = "httpx-0.24.1-py3-none-any.whl", hash = "sha256:06781eb9ac53cde990577af654bd990a4949de37a28bdb4a230d434f3a30b9bd"}, + {file = "httpx-0.24.1.tar.gz", hash = "sha256:5853a43053df830c20f8110c5e69fe44d035d850b2dfe795e196f00fdb774bdd"}, +] + +[package.dependencies] +certifi = "*" +httpcore = ">=0.15.0,<0.18.0" +idna = "*" +sniffio = "*" + +[package.extras] +brotli = ["brotli", "brotlicffi"] +cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] + +[[package]] +name = "huggingface-hub" +version = "0.20.2" +description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "huggingface_hub-0.20.2-py3-none-any.whl", hash = "sha256:53752eda2239d30a470c307a61cf9adcf136bc77b0a734338c7d04941af560d8"}, + {file = "huggingface_hub-0.20.2.tar.gz", hash = "sha256:215c5fceff631030c7a3d19ba7b588921c908b3f21eef31d160ebc245b200ff6"}, +] + +[package.dependencies] +filelock = "*" +fsspec = ">=2023.5.0" +packaging = ">=20.9" +pyyaml = ">=5.1" +requests = "*" +tqdm = ">=4.42.1" +typing-extensions = ">=3.7.4.3" + +[package.extras] +all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.1.3)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +cli = ["InquirerPy (==0.3.4)"] +dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.1.3)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] +inference = ["aiohttp", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)"] +quality = ["mypy (==1.5.1)", "ruff (>=0.1.3)"] +tensorflow = ["graphviz", "pydot", "tensorflow"] +testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "numpy", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] +torch = ["torch"] +typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"] + +[[package]] +name = "humanfriendly" +version = "10.0" +description = "Human friendly output for text interfaces using Python" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "humanfriendly-10.0-py2.py3-none-any.whl", hash = "sha256:1697e1a8a8f550fd43c2865cd84542fc175a61dcb779b6fee18cf6b6ccba1477"}, + {file = "humanfriendly-10.0.tar.gz", hash = "sha256:6b0b831ce8f15f7300721aa49829fc4e83921a9a301cc7f606be6686a2288ddc"}, +] + +[package.dependencies] +pyreadline3 = {version = "*", markers = "sys_platform == \"win32\" and python_version >= \"3.8\""} + +[[package]] +name = "hypercorn" +version = "0.14.4" +description = "A ASGI Server based on Hyper libraries and inspired by Gunicorn" +optional = false +python-versions = ">=3.7" +files = [ + {file = "hypercorn-0.14.4-py3-none-any.whl", hash = "sha256:f956200dbf8677684e6e976219ffa6691d6cf795281184b41dbb0b135ab37b8d"}, + {file = "hypercorn-0.14.4.tar.gz", hash = "sha256:3fa504efc46a271640023c9b88c3184fd64993f47a282e8ae1a13ccb285c2f67"}, +] + +[package.dependencies] +h11 = "*" +h2 = ">=3.1.0" +priority = "*" +tomli = {version = "*", markers = "python_version < \"3.11\""} +wsproto = ">=0.14.0" + +[package.extras] +docs = ["pydata_sphinx_theme"] +h3 = ["aioquic (>=0.9.0,<1.0)"] +trio = ["exceptiongroup (>=1.1.0)", "trio (>=0.22.0)"] +uvloop = ["uvloop"] + +[[package]] +name = "hyperframe" +version = "6.0.1" +description = "HTTP/2 framing layer for Python" +optional = false +python-versions = ">=3.6.1" +files = [ + {file = "hyperframe-6.0.1-py3-none-any.whl", hash = "sha256:0ec6bafd80d8ad2195c4f03aacba3a8265e57bc4cff261e802bf39970ed02a15"}, + {file = "hyperframe-6.0.1.tar.gz", hash = "sha256:ae510046231dc8e9ecb1a6586f63d2347bf4c8905914aa84ba585ae85f28a914"}, +] + +[[package]] +name = "identify" +version = "2.5.33" +description = "File identification library for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "identify-2.5.33-py2.py3-none-any.whl", hash = "sha256:d40ce5fcd762817627670da8a7d8d8e65f24342d14539c59488dc603bf662e34"}, + {file = "identify-2.5.33.tar.gz", hash = "sha256:161558f9fe4559e1557e1bff323e8631f6a0e4837f7497767c1782832f16b62d"}, +] + +[package.extras] +license = ["ukkonen"] + +[[package]] +name = "idna" +version = "3.6" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.5" +files = [ + {file = "idna-3.6-py3-none-any.whl", hash = "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f"}, + {file = "idna-3.6.tar.gz", hash = "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca"}, +] + +[[package]] +name = "importlib-metadata" +version = "6.11.0" +description = "Read metadata from Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "importlib_metadata-6.11.0-py3-none-any.whl", hash = "sha256:f0afba6205ad8f8947c7d338b5342d5db2afbfd82f9cbef7879a9539cc12eb9b"}, + {file = "importlib_metadata-6.11.0.tar.gz", hash = "sha256:1231cf92d825c9e03cfc4da076a16de6422c863558229ea0b22b675657463443"}, +] + +[package.dependencies] +zipp = ">=0.5" + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] +perf = ["ipython"] +testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"] + +[[package]] +name = "importlib-resources" +version = "6.1.1" +description = "Read resources from Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "importlib_resources-6.1.1-py3-none-any.whl", hash = "sha256:e8bf90d8213b486f428c9c39714b920041cb02c184686a3dee24905aaa8105d6"}, + {file = "importlib_resources-6.1.1.tar.gz", hash = "sha256:3893a00122eafde6894c59914446a512f728a0c1a45f9bb9b63721b6bacf0b4a"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-ruff", "zipp (>=3.17)"] + +[[package]] +name = "inflection" +version = "0.5.1" +description = "A port of Ruby on Rails inflector to Python" +optional = false +python-versions = ">=3.5" +files = [ + {file = "inflection-0.5.1-py2.py3-none-any.whl", hash = "sha256:f38b2b640938a4f35ade69ac3d053042959b62a0f1076a5bbaa1b9526605a8a2"}, + {file = "inflection-0.5.1.tar.gz", hash = "sha256:1a29730d366e996aaacffb2f1f1cb9593dc38e2ddd30c91250c6dde09ea9b417"}, +] + +[[package]] +name = "iniconfig" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.7" +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + +[[package]] +name = "ipython" +version = "8.20.0" +description = "IPython: Productive Interactive Computing" +optional = true +python-versions = ">=3.10" +files = [ + {file = "ipython-8.20.0-py3-none-any.whl", hash = "sha256:bc9716aad6f29f36c449e30821c9dd0c1c1a7b59ddcc26931685b87b4c569619"}, + {file = "ipython-8.20.0.tar.gz", hash = "sha256:2f21bd3fc1d51550c89ee3944ae04bbc7bc79e129ea0937da6e6c68bfdbf117a"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +decorator = "*" +exceptiongroup = {version = "*", markers = "python_version < \"3.11\""} +jedi = ">=0.16" +matplotlib-inline = "*" +pexpect = {version = ">4.3", markers = "sys_platform != \"win32\""} +prompt-toolkit = ">=3.0.41,<3.1.0" +pygments = ">=2.4.0" +stack-data = "*" +traitlets = ">=5" + +[package.extras] +all = ["black", "curio", "docrepr", "exceptiongroup", "ipykernel", "ipyparallel", "ipywidgets", "matplotlib", "matplotlib (!=3.2.0)", "nbconvert", "nbformat", "notebook", "numpy (>=1.23)", "pandas", "pickleshare", "pytest", "pytest-asyncio (<0.22)", "qtconsole", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "trio", "typing-extensions"] +black = ["black"] +doc = ["docrepr", "exceptiongroup", "ipykernel", "matplotlib", "pickleshare", "pytest", "pytest-asyncio (<0.22)", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "typing-extensions"] +kernel = ["ipykernel"] +nbconvert = ["nbconvert"] +nbformat = ["nbformat"] +notebook = ["ipywidgets", "notebook"] +parallel = ["ipyparallel"] +qtconsole = ["qtconsole"] +test = ["pickleshare", "pytest", "pytest-asyncio (<0.22)", "testpath"] +test-extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.23)", "pandas", "pickleshare", "pytest", "pytest-asyncio (<0.22)", "testpath", "trio"] + +[[package]] +name = "isort" +version = "5.13.2" +description = "A Python utility / library to sort Python imports." +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, + {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, +] + +[package.extras] +colors = ["colorama (>=0.4.6)"] + +[[package]] +name = "jedi" +version = "0.19.1" +description = "An autocompletion tool for Python that can be used for text editors." +optional = true +python-versions = ">=3.6" +files = [ + {file = "jedi-0.19.1-py2.py3-none-any.whl", hash = "sha256:e983c654fe5c02867aef4cdfce5a2fbb4a50adc0af145f70504238f18ef5e7e0"}, + {file = "jedi-0.19.1.tar.gz", hash = "sha256:cf0496f3651bc65d7174ac1b7d043eff454892c708a87d1b683e57b569927ffd"}, +] + +[package.dependencies] +parso = ">=0.8.3,<0.9.0" + +[package.extras] +docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alabaster (==0.7.12)", "babel (==2.9.1)", "chardet (==4.0.0)", "commonmark (==0.8.1)", "docutils (==0.17.1)", "future (==0.18.2)", "idna (==2.10)", "imagesize (==1.2.0)", "mock (==1.0.1)", "packaging (==20.9)", "pyparsing (==2.4.7)", "pytz (==2021.1)", "readthedocs-sphinx-ext (==2.1.4)", "recommonmark (==0.5.0)", "requests (==2.25.1)", "six (==1.15.0)", "snowballstemmer (==2.1.0)", "sphinx (==1.8.5)", "sphinx-rtd-theme (==0.4.3)", "sphinxcontrib-serializinghtml (==1.1.4)", "sphinxcontrib-websupport (==1.2.4)", "urllib3 (==1.26.4)"] +qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] +testing = ["Django", "attrs", "colorama", "docopt", "pytest (<7.0.0)"] + +[[package]] +name = "jinja2" +version = "3.1.3" +description = "A very fast and expressive template engine." +optional = false +python-versions = ">=3.7" +files = [ + {file = "Jinja2-3.1.3-py3-none-any.whl", hash = "sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa"}, + {file = "Jinja2-3.1.3.tar.gz", hash = "sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + +[[package]] +name = "jmespath" +version = "1.0.1" +description = "JSON Matching Expressions" +optional = false +python-versions = ">=3.7" +files = [ + {file = "jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980"}, + {file = "jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe"}, +] + +[[package]] +name = "jsonpickle" +version = "3.0.2" +description = "Python library for serializing any arbitrary object graph into JSON" +optional = true +python-versions = ">=3.7" +files = [ + {file = "jsonpickle-3.0.2-py3-none-any.whl", hash = "sha256:4a8442d97ca3f77978afa58068768dba7bff2dbabe79a9647bc3cdafd4ef019f"}, + {file = "jsonpickle-3.0.2.tar.gz", hash = "sha256:e37abba4bfb3ca4a4647d28bb9f4706436f7b46c8a8333b4a718abafa8e46b37"}, +] + +[package.extras] +docs = ["jaraco.packaging (>=3.2)", "rst.linker (>=1.9)", "sphinx"] +testing = ["ecdsa", "feedparser", "gmpy2", "numpy", "pandas", "pymongo", "pytest (>=3.5,!=3.7.3)", "pytest-black-multipy", "pytest-checkdocs (>=1.2.3)", "pytest-cov", "pytest-flake8 (>=1.1.1)", "scikit-learn", "sqlalchemy"] +testing-libs = ["simplejson", "ujson"] + +[[package]] +name = "jsonschema" +version = "4.21.0" +description = "An implementation of JSON Schema validation for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jsonschema-4.21.0-py3-none-any.whl", hash = "sha256:70a09719d375c0a2874571b363c8a24be7df8071b80c9aa76bc4551e7297c63c"}, + {file = "jsonschema-4.21.0.tar.gz", hash = "sha256:3ba18e27f7491ea4a1b22edce00fb820eec968d397feb3f9cb61d5894bb38167"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +jsonschema-specifications = ">=2023.03.6" +referencing = ">=0.28.4" +rpds-py = ">=0.7.1" + +[package.extras] +format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] +format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"] + +[[package]] +name = "jsonschema-specifications" +version = "2023.12.1" +description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jsonschema_specifications-2023.12.1-py3-none-any.whl", hash = "sha256:87e4fdf3a94858b8a2ba2778d9ba57d8a9cafca7c7489c46ba0d30a8bc6a9c3c"}, + {file = "jsonschema_specifications-2023.12.1.tar.gz", hash = "sha256:48a76787b3e70f5ed53f1160d2b81f586e4ca6d1548c5de7085d1682674764cc"}, +] + +[package.dependencies] +referencing = ">=0.31.0" + +[[package]] +name = "kiwisolver" +version = "1.4.5" +description = "A fast implementation of the Cassowary constraint solver" +optional = true +python-versions = ">=3.7" +files = [ + {file = "kiwisolver-1.4.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:05703cf211d585109fcd72207a31bb170a0f22144d68298dc5e61b3c946518af"}, + {file = "kiwisolver-1.4.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:146d14bebb7f1dc4d5fbf74f8a6cb15ac42baadee8912eb84ac0b3b2a3dc6ac3"}, + {file = "kiwisolver-1.4.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6ef7afcd2d281494c0a9101d5c571970708ad911d028137cd558f02b851c08b4"}, + {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9eaa8b117dc8337728e834b9c6e2611f10c79e38f65157c4c38e9400286f5cb1"}, + {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ec20916e7b4cbfb1f12380e46486ec4bcbaa91a9c448b97023fde0d5bbf9e4ff"}, + {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39b42c68602539407884cf70d6a480a469b93b81b7701378ba5e2328660c847a"}, + {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa12042de0171fad672b6c59df69106d20d5596e4f87b5e8f76df757a7c399aa"}, + {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a40773c71d7ccdd3798f6489aaac9eee213d566850a9533f8d26332d626b82c"}, + {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:19df6e621f6d8b4b9c4d45f40a66839294ff2bb235e64d2178f7522d9170ac5b"}, + {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:83d78376d0d4fd884e2c114d0621624b73d2aba4e2788182d286309ebdeed770"}, + {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e391b1f0a8a5a10ab3b9bb6afcfd74f2175f24f8975fb87ecae700d1503cdee0"}, + {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:852542f9481f4a62dbb5dd99e8ab7aedfeb8fb6342349a181d4036877410f525"}, + {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59edc41b24031bc25108e210c0def6f6c2191210492a972d585a06ff246bb79b"}, + {file = "kiwisolver-1.4.5-cp310-cp310-win32.whl", hash = "sha256:a6aa6315319a052b4ee378aa171959c898a6183f15c1e541821c5c59beaa0238"}, + {file = "kiwisolver-1.4.5-cp310-cp310-win_amd64.whl", hash = "sha256:d0ef46024e6a3d79c01ff13801cb19d0cad7fd859b15037aec74315540acc276"}, + {file = "kiwisolver-1.4.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:11863aa14a51fd6ec28688d76f1735f8f69ab1fabf388851a595d0721af042f5"}, + {file = "kiwisolver-1.4.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8ab3919a9997ab7ef2fbbed0cc99bb28d3c13e6d4b1ad36e97e482558a91be90"}, + {file = "kiwisolver-1.4.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fcc700eadbbccbf6bc1bcb9dbe0786b4b1cb91ca0dcda336eef5c2beed37b797"}, + {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dfdd7c0b105af050eb3d64997809dc21da247cf44e63dc73ff0fd20b96be55a9"}, + {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76c6a5964640638cdeaa0c359382e5703e9293030fe730018ca06bc2010c4437"}, + {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bbea0db94288e29afcc4c28afbf3a7ccaf2d7e027489c449cf7e8f83c6346eb9"}, + {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ceec1a6bc6cab1d6ff5d06592a91a692f90ec7505d6463a88a52cc0eb58545da"}, + {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:040c1aebeda72197ef477a906782b5ab0d387642e93bda547336b8957c61022e"}, + {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f91de7223d4c7b793867797bacd1ee53bfe7359bd70d27b7b58a04efbb9436c8"}, + {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:faae4860798c31530dd184046a900e652c95513796ef51a12bc086710c2eec4d"}, + {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:b0157420efcb803e71d1b28e2c287518b8808b7cf1ab8af36718fd0a2c453eb0"}, + {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:06f54715b7737c2fecdbf140d1afb11a33d59508a47bf11bb38ecf21dc9ab79f"}, + {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fdb7adb641a0d13bdcd4ef48e062363d8a9ad4a182ac7647ec88f695e719ae9f"}, + {file = "kiwisolver-1.4.5-cp311-cp311-win32.whl", hash = "sha256:bb86433b1cfe686da83ce32a9d3a8dd308e85c76b60896d58f082136f10bffac"}, + {file = "kiwisolver-1.4.5-cp311-cp311-win_amd64.whl", hash = "sha256:6c08e1312a9cf1074d17b17728d3dfce2a5125b2d791527f33ffbe805200a355"}, + {file = "kiwisolver-1.4.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:32d5cf40c4f7c7b3ca500f8985eb3fb3a7dfc023215e876f207956b5ea26632a"}, + {file = "kiwisolver-1.4.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f846c260f483d1fd217fe5ed7c173fb109efa6b1fc8381c8b7552c5781756192"}, + {file = "kiwisolver-1.4.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5ff5cf3571589b6d13bfbfd6bcd7a3f659e42f96b5fd1c4830c4cf21d4f5ef45"}, + {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7269d9e5f1084a653d575c7ec012ff57f0c042258bf5db0954bf551c158466e7"}, + {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da802a19d6e15dffe4b0c24b38b3af68e6c1a68e6e1d8f30148c83864f3881db"}, + {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3aba7311af82e335dd1e36ffff68aaca609ca6290c2cb6d821a39aa075d8e3ff"}, + {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:763773d53f07244148ccac5b084da5adb90bfaee39c197554f01b286cf869228"}, + {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2270953c0d8cdab5d422bee7d2007f043473f9d2999631c86a223c9db56cbd16"}, + {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d099e745a512f7e3bbe7249ca835f4d357c586d78d79ae8f1dcd4d8adeb9bda9"}, + {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:74db36e14a7d1ce0986fa104f7d5637aea5c82ca6326ed0ec5694280942d1162"}, + {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:7e5bab140c309cb3a6ce373a9e71eb7e4873c70c2dda01df6820474f9889d6d4"}, + {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:0f114aa76dc1b8f636d077979c0ac22e7cd8f3493abbab152f20eb8d3cda71f3"}, + {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:88a2df29d4724b9237fc0c6eaf2a1adae0cdc0b3e9f4d8e7dc54b16812d2d81a"}, + {file = "kiwisolver-1.4.5-cp312-cp312-win32.whl", hash = "sha256:72d40b33e834371fd330fb1472ca19d9b8327acb79a5821d4008391db8e29f20"}, + {file = "kiwisolver-1.4.5-cp312-cp312-win_amd64.whl", hash = "sha256:2c5674c4e74d939b9d91dda0fae10597ac7521768fec9e399c70a1f27e2ea2d9"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3a2b053a0ab7a3960c98725cfb0bf5b48ba82f64ec95fe06f1d06c99b552e130"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cd32d6c13807e5c66a7cbb79f90b553642f296ae4518a60d8d76243b0ad2898"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59ec7b7c7e1a61061850d53aaf8e93db63dce0c936db1fda2658b70e4a1be709"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da4cfb373035def307905d05041c1d06d8936452fe89d464743ae7fb8371078b"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2400873bccc260b6ae184b2b8a4fec0e4082d30648eadb7c3d9a13405d861e89"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1b04139c4236a0f3aff534479b58f6f849a8b351e1314826c2d230849ed48985"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:4e66e81a5779b65ac21764c295087de82235597a2293d18d943f8e9e32746265"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:7931d8f1f67c4be9ba1dd9c451fb0eeca1a25b89e4d3f89e828fe12a519b782a"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:b3f7e75f3015df442238cca659f8baa5f42ce2a8582727981cbfa15fee0ee205"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:bbf1d63eef84b2e8c89011b7f2235b1e0bf7dacc11cac9431fc6468e99ac77fb"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:4c380469bd3f970ef677bf2bcba2b6b0b4d5c75e7a020fb863ef75084efad66f"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-win32.whl", hash = "sha256:9408acf3270c4b6baad483865191e3e582b638b1654a007c62e3efe96f09a9a3"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-win_amd64.whl", hash = "sha256:5b94529f9b2591b7af5f3e0e730a4e0a41ea174af35a4fd067775f9bdfeee01a"}, + {file = "kiwisolver-1.4.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:11c7de8f692fc99816e8ac50d1d1aef4f75126eefc33ac79aac02c099fd3db71"}, + {file = "kiwisolver-1.4.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:53abb58632235cd154176ced1ae8f0d29a6657aa1aa9decf50b899b755bc2b93"}, + {file = "kiwisolver-1.4.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:88b9f257ca61b838b6f8094a62418421f87ac2a1069f7e896c36a7d86b5d4c29"}, + {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3195782b26fc03aa9c6913d5bad5aeb864bdc372924c093b0f1cebad603dd712"}, + {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc579bf0f502e54926519451b920e875f433aceb4624a3646b3252b5caa9e0b6"}, + {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5a580c91d686376f0f7c295357595c5a026e6cbc3d77b7c36e290201e7c11ecb"}, + {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cfe6ab8da05c01ba6fbea630377b5da2cd9bcbc6338510116b01c1bc939a2c18"}, + {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:d2e5a98f0ec99beb3c10e13b387f8db39106d53993f498b295f0c914328b1333"}, + {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a51a263952b1429e429ff236d2f5a21c5125437861baeed77f5e1cc2d2c7c6da"}, + {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3edd2fa14e68c9be82c5b16689e8d63d89fe927e56debd6e1dbce7a26a17f81b"}, + {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:74d1b44c6cfc897df648cc9fdaa09bc3e7679926e6f96df05775d4fb3946571c"}, + {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:76d9289ed3f7501012e05abb8358bbb129149dbd173f1f57a1bf1c22d19ab7cc"}, + {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:92dea1ffe3714fa8eb6a314d2b3c773208d865a0e0d35e713ec54eea08a66250"}, + {file = "kiwisolver-1.4.5-cp38-cp38-win32.whl", hash = "sha256:5c90ae8c8d32e472be041e76f9d2f2dbff4d0b0be8bd4041770eddb18cf49a4e"}, + {file = "kiwisolver-1.4.5-cp38-cp38-win_amd64.whl", hash = "sha256:c7940c1dc63eb37a67721b10d703247552416f719c4188c54e04334321351ced"}, + {file = "kiwisolver-1.4.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9407b6a5f0d675e8a827ad8742e1d6b49d9c1a1da5d952a67d50ef5f4170b18d"}, + {file = "kiwisolver-1.4.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:15568384086b6df3c65353820a4473575dbad192e35010f622c6ce3eebd57af9"}, + {file = "kiwisolver-1.4.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0dc9db8e79f0036e8173c466d21ef18e1befc02de8bf8aa8dc0813a6dc8a7046"}, + {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:cdc8a402aaee9a798b50d8b827d7ecf75edc5fb35ea0f91f213ff927c15f4ff0"}, + {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6c3bd3cde54cafb87d74d8db50b909705c62b17c2099b8f2e25b461882e544ff"}, + {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:955e8513d07a283056b1396e9a57ceddbd272d9252c14f154d450d227606eb54"}, + {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:346f5343b9e3f00b8db8ba359350eb124b98c99efd0b408728ac6ebf38173958"}, + {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b9098e0049e88c6a24ff64545cdfc50807818ba6c1b739cae221bbbcbc58aad3"}, + {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:00bd361b903dc4bbf4eb165f24d1acbee754fce22ded24c3d56eec268658a5cf"}, + {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7b8b454bac16428b22560d0a1cf0a09875339cab69df61d7805bf48919415901"}, + {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:f1d072c2eb0ad60d4c183f3fb44ac6f73fb7a8f16a2694a91f988275cbf352f9"}, + {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:31a82d498054cac9f6d0b53d02bb85811185bcb477d4b60144f915f3b3126342"}, + {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6512cb89e334e4700febbffaaa52761b65b4f5a3cf33f960213d5656cea36a77"}, + {file = "kiwisolver-1.4.5-cp39-cp39-win32.whl", hash = "sha256:9db8ea4c388fdb0f780fe91346fd438657ea602d58348753d9fb265ce1bca67f"}, + {file = "kiwisolver-1.4.5-cp39-cp39-win_amd64.whl", hash = "sha256:59415f46a37f7f2efeec758353dd2eae1b07640d8ca0f0c42548ec4125492635"}, + {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5c7b3b3a728dc6faf3fc372ef24f21d1e3cee2ac3e9596691d746e5a536de920"}, + {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:620ced262a86244e2be10a676b646f29c34537d0d9cc8eb26c08f53d98013390"}, + {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:378a214a1e3bbf5ac4a8708304318b4f890da88c9e6a07699c4ae7174c09a68d"}, + {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf7be1207676ac608a50cd08f102f6742dbfc70e8d60c4db1c6897f62f71523"}, + {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:ba55dce0a9b8ff59495ddd050a0225d58bd0983d09f87cfe2b6aec4f2c1234e4"}, + {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fd32ea360bcbb92d28933fc05ed09bffcb1704ba3fc7942e81db0fd4f81a7892"}, + {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5e7139af55d1688f8b960ee9ad5adafc4ac17c1c473fe07133ac092310d76544"}, + {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dced8146011d2bc2e883f9bd68618b8247387f4bbec46d7392b3c3b032640126"}, + {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9bf3325c47b11b2e51bca0824ea217c7cd84491d8ac4eefd1e409705ef092bd"}, + {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:5794cf59533bc3f1b1c821f7206a3617999db9fbefc345360aafe2e067514929"}, + {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e368f200bbc2e4f905b8e71eb38b3c04333bddaa6a2464a6355487b02bb7fb09"}, + {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5d706eba36b4c4d5bc6c6377bb6568098765e990cfc21ee16d13963fab7b3e7"}, + {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85267bd1aa8880a9c88a8cb71e18d3d64d2751a790e6ca6c27b8ccc724bcd5ad"}, + {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:210ef2c3a1f03272649aff1ef992df2e724748918c4bc2d5a90352849eb40bea"}, + {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:11d011a7574eb3b82bcc9c1a1d35c1d7075677fdd15de527d91b46bd35e935ee"}, + {file = "kiwisolver-1.4.5.tar.gz", hash = "sha256:e57e563a57fb22a142da34f38acc2fc1a5c864bc29ca1517a88abc963e60d6ec"}, +] + +[[package]] +name = "kubernetes" +version = "29.0.0" +description = "Kubernetes python client" +optional = false +python-versions = ">=3.6" +files = [ + {file = "kubernetes-29.0.0-py2.py3-none-any.whl", hash = "sha256:ab8cb0e0576ccdfb71886366efb102c6a20f268d817be065ce7f9909c631e43e"}, + {file = "kubernetes-29.0.0.tar.gz", hash = "sha256:c4812e227ae74d07d53c88293e564e54b850452715a59a927e7e1bc6b9a60459"}, +] + +[package.dependencies] +certifi = ">=14.05.14" +google-auth = ">=1.0.1" +oauthlib = ">=3.2.2" +python-dateutil = ">=2.5.3" +pyyaml = ">=5.4.1" +requests = "*" +requests-oauthlib = "*" +six = ">=1.9.0" +urllib3 = ">=1.24.2" +websocket-client = ">=0.32.0,<0.40.0 || >0.40.0,<0.41.dev0 || >=0.43.dev0" + +[package.extras] +adal = ["adal (>=1.0.2)"] + +[[package]] +name = "langcodes" +version = "3.3.0" +description = "Tools for labeling human languages with IETF language tags" +optional = false +python-versions = ">=3.6" +files = [ + {file = "langcodes-3.3.0-py3-none-any.whl", hash = "sha256:4d89fc9acb6e9c8fdef70bcdf376113a3db09b67285d9e1d534de6d8818e7e69"}, + {file = "langcodes-3.3.0.tar.gz", hash = "sha256:794d07d5a28781231ac335a1561b8442f8648ca07cd518310aeb45d6f0807ef6"}, +] + +[package.extras] +data = ["language-data (>=1.1,<2.0)"] + +[[package]] +name = "litellm" +version = "1.17.13" +description = "Library to easily interface with LLM API providers" +optional = false +python-versions = ">=3.8, !=2.7.*, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*, !=3.7.*" +files = [ + {file = "litellm-1.17.13-py3-none-any.whl", hash = "sha256:d9370077a3a5e3540c3d2e84580c3fee5add9ff606d6715da0428399b73f389f"}, + {file = "litellm-1.17.13.tar.gz", hash = "sha256:df2d42bb759cff4d3390a51c11dad87cfd581e56d43103bb4eb4fa4a81d0d38e"}, +] + +[package.dependencies] +aiohttp = "*" +click = "*" +importlib-metadata = ">=6.8.0" +jinja2 = ">=3.1.2,<4.0.0" +openai = ">=1.0.0" +python-dotenv = ">=0.2.0" +requests = ">=2.31.0,<3.0.0" +tiktoken = ">=0.4.0" +tokenizers = "*" + +[package.extras] +extra-proxy = ["streamlit (>=1.29.0,<2.0.0)"] +proxy = ["backoff", "fastapi (>=0.104.1,<0.105.0)", "gunicorn (>=21.2.0,<22.0.0)", "orjson (>=3.9.7,<4.0.0)", "pyyaml (>=6.0,<7.0)", "rq", "uvicorn (>=0.22.0,<0.23.0)"] + +[[package]] +name = "loguru" +version = "0.7.2" +description = "Python logging made (stupidly) simple" +optional = false +python-versions = ">=3.5" +files = [ + {file = "loguru-0.7.2-py3-none-any.whl", hash = "sha256:003d71e3d3ed35f0f8984898359d65b79e5b21943f78af86aa5491210429b8eb"}, + {file = "loguru-0.7.2.tar.gz", hash = "sha256:e671a53522515f34fd406340ee968cb9ecafbc4b36c679da03c18fd8d0bd51ac"}, +] + +[package.dependencies] +colorama = {version = ">=0.3.4", markers = "sys_platform == \"win32\""} +win32-setctime = {version = ">=1.0.0", markers = "sys_platform == \"win32\""} + +[package.extras] +dev = ["Sphinx (==7.2.5)", "colorama (==0.4.5)", "colorama (==0.4.6)", "exceptiongroup (==1.1.3)", "freezegun (==1.1.0)", "freezegun (==1.2.2)", "mypy (==v0.910)", "mypy (==v0.971)", "mypy (==v1.4.1)", "mypy (==v1.5.1)", "pre-commit (==3.4.0)", "pytest (==6.1.2)", "pytest (==7.4.0)", "pytest-cov (==2.12.1)", "pytest-cov (==4.1.0)", "pytest-mypy-plugins (==1.9.3)", "pytest-mypy-plugins (==3.0.0)", "sphinx-autobuild (==2021.3.14)", "sphinx-rtd-theme (==1.3.0)", "tox (==3.27.1)", "tox (==4.11.0)"] + +[[package]] +name = "lxml" +version = "5.1.0" +description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." +optional = false +python-versions = ">=3.6" +files = [ + {file = "lxml-5.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:704f5572ff473a5f897745abebc6df40f22d4133c1e0a1f124e4f2bd3330ff7e"}, + {file = "lxml-5.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9d3c0f8567ffe7502d969c2c1b809892dc793b5d0665f602aad19895f8d508da"}, + {file = "lxml-5.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5fcfbebdb0c5d8d18b84118842f31965d59ee3e66996ac842e21f957eb76138c"}, + {file = "lxml-5.1.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2f37c6d7106a9d6f0708d4e164b707037b7380fcd0b04c5bd9cae1fb46a856fb"}, + {file = "lxml-5.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2befa20a13f1a75c751f47e00929fb3433d67eb9923c2c0b364de449121f447c"}, + {file = "lxml-5.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22b7ee4c35f374e2c20337a95502057964d7e35b996b1c667b5c65c567d2252a"}, + {file = "lxml-5.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:bf8443781533b8d37b295016a4b53c1494fa9a03573c09ca5104550c138d5c05"}, + {file = "lxml-5.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:82bddf0e72cb2af3cbba7cec1d2fd11fda0de6be8f4492223d4a268713ef2147"}, + {file = "lxml-5.1.0-cp310-cp310-win32.whl", hash = "sha256:b66aa6357b265670bb574f050ffceefb98549c721cf28351b748be1ef9577d93"}, + {file = "lxml-5.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:4946e7f59b7b6a9e27bef34422f645e9a368cb2be11bf1ef3cafc39a1f6ba68d"}, + {file = "lxml-5.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:14deca1460b4b0f6b01f1ddc9557704e8b365f55c63070463f6c18619ebf964f"}, + {file = "lxml-5.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ed8c3d2cd329bf779b7ed38db176738f3f8be637bb395ce9629fc76f78afe3d4"}, + {file = "lxml-5.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:436a943c2900bb98123b06437cdd30580a61340fbdb7b28aaf345a459c19046a"}, + {file = "lxml-5.1.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:acb6b2f96f60f70e7f34efe0c3ea34ca63f19ca63ce90019c6cbca6b676e81fa"}, + {file = "lxml-5.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:af8920ce4a55ff41167ddbc20077f5698c2e710ad3353d32a07d3264f3a2021e"}, + {file = "lxml-5.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7cfced4a069003d8913408e10ca8ed092c49a7f6cefee9bb74b6b3e860683b45"}, + {file = "lxml-5.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:9e5ac3437746189a9b4121db2a7b86056ac8786b12e88838696899328fc44bb2"}, + {file = "lxml-5.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f4c9bda132ad108b387c33fabfea47866af87f4ea6ffb79418004f0521e63204"}, + {file = "lxml-5.1.0-cp311-cp311-win32.whl", hash = "sha256:bc64d1b1dab08f679fb89c368f4c05693f58a9faf744c4d390d7ed1d8223869b"}, + {file = "lxml-5.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:a5ab722ae5a873d8dcee1f5f45ddd93c34210aed44ff2dc643b5025981908cda"}, + {file = "lxml-5.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:9aa543980ab1fbf1720969af1d99095a548ea42e00361e727c58a40832439114"}, + {file = "lxml-5.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6f11b77ec0979f7e4dc5ae081325a2946f1fe424148d3945f943ceaede98adb8"}, + {file = "lxml-5.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a36c506e5f8aeb40680491d39ed94670487ce6614b9d27cabe45d94cd5d63e1e"}, + {file = "lxml-5.1.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f643ffd2669ffd4b5a3e9b41c909b72b2a1d5e4915da90a77e119b8d48ce867a"}, + {file = "lxml-5.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:16dd953fb719f0ffc5bc067428fc9e88f599e15723a85618c45847c96f11f431"}, + {file = "lxml-5.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16018f7099245157564d7148165132c70adb272fb5a17c048ba70d9cc542a1a1"}, + {file = "lxml-5.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:82cd34f1081ae4ea2ede3d52f71b7be313756e99b4b5f829f89b12da552d3aa3"}, + {file = "lxml-5.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:19a1bc898ae9f06bccb7c3e1dfd73897ecbbd2c96afe9095a6026016e5ca97b8"}, + {file = "lxml-5.1.0-cp312-cp312-win32.whl", hash = "sha256:13521a321a25c641b9ea127ef478b580b5ec82aa2e9fc076c86169d161798b01"}, + {file = "lxml-5.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:1ad17c20e3666c035db502c78b86e58ff6b5991906e55bdbef94977700c72623"}, + {file = "lxml-5.1.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:24ef5a4631c0b6cceaf2dbca21687e29725b7c4e171f33a8f8ce23c12558ded1"}, + {file = "lxml-5.1.0-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8d2900b7f5318bc7ad8631d3d40190b95ef2aa8cc59473b73b294e4a55e9f30f"}, + {file = "lxml-5.1.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:601f4a75797d7a770daed8b42b97cd1bb1ba18bd51a9382077a6a247a12aa38d"}, + {file = "lxml-5.1.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4b68c961b5cc402cbd99cca5eb2547e46ce77260eb705f4d117fd9c3f932b95"}, + {file = "lxml-5.1.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:afd825e30f8d1f521713a5669b63657bcfe5980a916c95855060048b88e1adb7"}, + {file = "lxml-5.1.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:262bc5f512a66b527d026518507e78c2f9c2bd9eb5c8aeeb9f0eb43fcb69dc67"}, + {file = "lxml-5.1.0-cp36-cp36m-win32.whl", hash = "sha256:e856c1c7255c739434489ec9c8aa9cdf5179785d10ff20add308b5d673bed5cd"}, + {file = "lxml-5.1.0-cp36-cp36m-win_amd64.whl", hash = "sha256:c7257171bb8d4432fe9d6fdde4d55fdbe663a63636a17f7f9aaba9bcb3153ad7"}, + {file = "lxml-5.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b9e240ae0ba96477682aa87899d94ddec1cc7926f9df29b1dd57b39e797d5ab5"}, + {file = "lxml-5.1.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a96f02ba1bcd330807fc060ed91d1f7a20853da6dd449e5da4b09bfcc08fdcf5"}, + {file = "lxml-5.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e3898ae2b58eeafedfe99e542a17859017d72d7f6a63de0f04f99c2cb125936"}, + {file = "lxml-5.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61c5a7edbd7c695e54fca029ceb351fc45cd8860119a0f83e48be44e1c464862"}, + {file = "lxml-5.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:3aeca824b38ca78d9ee2ab82bd9883083d0492d9d17df065ba3b94e88e4d7ee6"}, + {file = "lxml-5.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8f52fe6859b9db71ee609b0c0a70fea5f1e71c3462ecf144ca800d3f434f0764"}, + {file = "lxml-5.1.0-cp37-cp37m-win32.whl", hash = "sha256:d42e3a3fc18acc88b838efded0e6ec3edf3e328a58c68fbd36a7263a874906c8"}, + {file = "lxml-5.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:eac68f96539b32fce2c9b47eb7c25bb2582bdaf1bbb360d25f564ee9e04c542b"}, + {file = "lxml-5.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ae15347a88cf8af0949a9872b57a320d2605ae069bcdf047677318bc0bba45b1"}, + {file = "lxml-5.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c26aab6ea9c54d3bed716b8851c8bfc40cb249b8e9880e250d1eddde9f709bf5"}, + {file = "lxml-5.1.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:342e95bddec3a698ac24378d61996b3ee5ba9acfeb253986002ac53c9a5f6f84"}, + {file = "lxml-5.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:725e171e0b99a66ec8605ac77fa12239dbe061482ac854d25720e2294652eeaa"}, + {file = "lxml-5.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d184e0d5c918cff04cdde9dbdf9600e960161d773666958c9d7b565ccc60c45"}, + {file = "lxml-5.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:98f3f020a2b736566c707c8e034945c02aa94e124c24f77ca097c446f81b01f1"}, + {file = "lxml-5.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6d48fc57e7c1e3df57be5ae8614bab6d4e7b60f65c5457915c26892c41afc59e"}, + {file = "lxml-5.1.0-cp38-cp38-win32.whl", hash = "sha256:7ec465e6549ed97e9f1e5ed51c657c9ede767bc1c11552f7f4d022c4df4a977a"}, + {file = "lxml-5.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:b21b4031b53d25b0858d4e124f2f9131ffc1530431c6d1321805c90da78388d1"}, + {file = "lxml-5.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:52427a7eadc98f9e62cb1368a5079ae826f94f05755d2d567d93ee1bc3ceb354"}, + {file = "lxml-5.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6a2a2c724d97c1eb8cf966b16ca2915566a4904b9aad2ed9a09c748ffe14f969"}, + {file = "lxml-5.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:843b9c835580d52828d8f69ea4302537337a21e6b4f1ec711a52241ba4a824f3"}, + {file = "lxml-5.1.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9b99f564659cfa704a2dd82d0684207b1aadf7d02d33e54845f9fc78e06b7581"}, + {file = "lxml-5.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f8b0c78e7aac24979ef09b7f50da871c2de2def043d468c4b41f512d831e912"}, + {file = "lxml-5.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9bcf86dfc8ff3e992fed847c077bd875d9e0ba2fa25d859c3a0f0f76f07f0c8d"}, + {file = "lxml-5.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:49a9b4af45e8b925e1cd6f3b15bbba2c81e7dba6dce170c677c9cda547411e14"}, + {file = "lxml-5.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:280f3edf15c2a967d923bcfb1f8f15337ad36f93525828b40a0f9d6c2ad24890"}, + {file = "lxml-5.1.0-cp39-cp39-win32.whl", hash = "sha256:ed7326563024b6e91fef6b6c7a1a2ff0a71b97793ac33dbbcf38f6005e51ff6e"}, + {file = "lxml-5.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:8d7b4beebb178e9183138f552238f7e6613162a42164233e2bda00cb3afac58f"}, + {file = "lxml-5.1.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9bd0ae7cc2b85320abd5e0abad5ccee5564ed5f0cc90245d2f9a8ef330a8deae"}, + {file = "lxml-5.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8c1d679df4361408b628f42b26a5d62bd3e9ba7f0c0e7969f925021554755aa"}, + {file = "lxml-5.1.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:2ad3a8ce9e8a767131061a22cd28fdffa3cd2dc193f399ff7b81777f3520e372"}, + {file = "lxml-5.1.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:304128394c9c22b6569eba2a6d98392b56fbdfbad58f83ea702530be80d0f9df"}, + {file = "lxml-5.1.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d74fcaf87132ffc0447b3c685a9f862ffb5b43e70ea6beec2fb8057d5d2a1fea"}, + {file = "lxml-5.1.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:8cf5877f7ed384dabfdcc37922c3191bf27e55b498fecece9fd5c2c7aaa34c33"}, + {file = "lxml-5.1.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:877efb968c3d7eb2dad540b6cabf2f1d3c0fbf4b2d309a3c141f79c7e0061324"}, + {file = "lxml-5.1.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f14a4fb1c1c402a22e6a341a24c1341b4a3def81b41cd354386dcb795f83897"}, + {file = "lxml-5.1.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:25663d6e99659544ee8fe1b89b1a8c0aaa5e34b103fab124b17fa958c4a324a6"}, + {file = "lxml-5.1.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:8b9f19df998761babaa7f09e6bc169294eefafd6149aaa272081cbddc7ba4ca3"}, + {file = "lxml-5.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e53d7e6a98b64fe54775d23a7c669763451340c3d44ad5e3a3b48a1efbdc96f"}, + {file = "lxml-5.1.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c3cd1fc1dc7c376c54440aeaaa0dcc803d2126732ff5c6b68ccd619f2e64be4f"}, + {file = "lxml-5.1.0.tar.gz", hash = "sha256:3eea6ed6e6c918e468e693c41ef07f3c3acc310b70ddd9cc72d9ef84bc9564ca"}, +] + +[package.extras] +cssselect = ["cssselect (>=0.7)"] +html5 = ["html5lib"] +htmlsoup = ["BeautifulSoup4"] +source = ["Cython (>=3.0.7)"] + +[[package]] +name = "markupsafe" +version = "2.1.3" +description = "Safely add untrusted strings to HTML/XML markup." +optional = false +python-versions = ">=3.7" +files = [ + {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-win32.whl", hash = "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-win32.whl", hash = "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-win32.whl", hash = "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-win_amd64.whl", hash = "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-win32.whl", hash = "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-win_amd64.whl", hash = "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-win32.whl", hash = "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-win_amd64.whl", hash = "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba"}, + {file = "MarkupSafe-2.1.3.tar.gz", hash = "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad"}, +] + +[[package]] +name = "matplotlib" +version = "3.8.2" +description = "Python plotting package" +optional = true +python-versions = ">=3.9" +files = [ + {file = "matplotlib-3.8.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:09796f89fb71a0c0e1e2f4bdaf63fb2cefc84446bb963ecdeb40dfee7dfa98c7"}, + {file = "matplotlib-3.8.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6f9c6976748a25e8b9be51ea028df49b8e561eed7809146da7a47dbecebab367"}, + {file = "matplotlib-3.8.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b78e4f2cedf303869b782071b55fdde5987fda3038e9d09e58c91cc261b5ad18"}, + {file = "matplotlib-3.8.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e208f46cf6576a7624195aa047cb344a7f802e113bb1a06cfd4bee431de5e31"}, + {file = "matplotlib-3.8.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:46a569130ff53798ea5f50afce7406e91fdc471ca1e0e26ba976a8c734c9427a"}, + {file = "matplotlib-3.8.2-cp310-cp310-win_amd64.whl", hash = "sha256:830f00640c965c5b7f6bc32f0d4ce0c36dfe0379f7dd65b07a00c801713ec40a"}, + {file = "matplotlib-3.8.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d86593ccf546223eb75a39b44c32788e6f6440d13cfc4750c1c15d0fcb850b63"}, + {file = "matplotlib-3.8.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9a5430836811b7652991939012f43d2808a2db9b64ee240387e8c43e2e5578c8"}, + {file = "matplotlib-3.8.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9576723858a78751d5aacd2497b8aef29ffea6d1c95981505877f7ac28215c6"}, + {file = "matplotlib-3.8.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ba9cbd8ac6cf422f3102622b20f8552d601bf8837e49a3afed188d560152788"}, + {file = "matplotlib-3.8.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:03f9d160a29e0b65c0790bb07f4f45d6a181b1ac33eb1bb0dd225986450148f0"}, + {file = "matplotlib-3.8.2-cp311-cp311-win_amd64.whl", hash = "sha256:3773002da767f0a9323ba1a9b9b5d00d6257dbd2a93107233167cfb581f64717"}, + {file = "matplotlib-3.8.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:4c318c1e95e2f5926fba326f68177dee364aa791d6df022ceb91b8221bd0a627"}, + {file = "matplotlib-3.8.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:091275d18d942cf1ee9609c830a1bc36610607d8223b1b981c37d5c9fc3e46a4"}, + {file = "matplotlib-3.8.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b0f3b8ea0e99e233a4bcc44590f01604840d833c280ebb8fe5554fd3e6cfe8d"}, + {file = "matplotlib-3.8.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7b1704a530395aaf73912be741c04d181f82ca78084fbd80bc737be04848331"}, + {file = "matplotlib-3.8.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:533b0e3b0c6768eef8cbe4b583731ce25a91ab54a22f830db2b031e83cca9213"}, + {file = "matplotlib-3.8.2-cp312-cp312-win_amd64.whl", hash = "sha256:0f4fc5d72b75e2c18e55eb32292659cf731d9d5b312a6eb036506304f4675630"}, + {file = "matplotlib-3.8.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:deaed9ad4da0b1aea77fe0aa0cebb9ef611c70b3177be936a95e5d01fa05094f"}, + {file = "matplotlib-3.8.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:172f4d0fbac3383d39164c6caafd3255ce6fa58f08fc392513a0b1d3b89c4f89"}, + {file = "matplotlib-3.8.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7d36c2209d9136cd8e02fab1c0ddc185ce79bc914c45054a9f514e44c787917"}, + {file = "matplotlib-3.8.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5864bdd7da445e4e5e011b199bb67168cdad10b501750367c496420f2ad00843"}, + {file = "matplotlib-3.8.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ef8345b48e95cee45ff25192ed1f4857273117917a4dcd48e3905619bcd9c9b8"}, + {file = "matplotlib-3.8.2-cp39-cp39-win_amd64.whl", hash = "sha256:7c48d9e221b637c017232e3760ed30b4e8d5dfd081daf327e829bf2a72c731b4"}, + {file = "matplotlib-3.8.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:aa11b3c6928a1e496c1a79917d51d4cd5d04f8a2e75f21df4949eeefdf697f4b"}, + {file = "matplotlib-3.8.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1095fecf99eeb7384dabad4bf44b965f929a5f6079654b681193edf7169ec20"}, + {file = "matplotlib-3.8.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:bddfb1db89bfaa855912261c805bd0e10218923cc262b9159a49c29a7a1c1afa"}, + {file = "matplotlib-3.8.2.tar.gz", hash = "sha256:01a978b871b881ee76017152f1f1a0cbf6bd5f7b8ff8c96df0df1bd57d8755a1"}, +] + +[package.dependencies] +contourpy = ">=1.0.1" +cycler = ">=0.10" +fonttools = ">=4.22.0" +kiwisolver = ">=1.3.1" +numpy = ">=1.21,<2" +packaging = ">=20.0" +pillow = ">=8" +pyparsing = ">=2.3.1" +python-dateutil = ">=2.7" + +[[package]] +name = "matplotlib-inline" +version = "0.1.6" +description = "Inline Matplotlib backend for Jupyter" +optional = true +python-versions = ">=3.5" +files = [ + {file = "matplotlib-inline-0.1.6.tar.gz", hash = "sha256:f887e5f10ba98e8d2b150ddcf4702c1e5f8b3a20005eb0f74bfdbd360ee6f304"}, + {file = "matplotlib_inline-0.1.6-py3-none-any.whl", hash = "sha256:f1f41aab5328aa5aaea9b16d083b128102f8712542f819fe7e6a420ff581b311"}, +] + +[package.dependencies] +traitlets = "*" + +[[package]] +name = "mccabe" +version = "0.7.0" +description = "McCabe checker, plugin for flake8" +optional = false +python-versions = ">=3.6" +files = [ + {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, + {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, +] + +[[package]] +name = "mmh3" +version = "4.1.0" +description = "Python extension for MurmurHash (MurmurHash3), a set of fast and robust hash functions." +optional = false +python-versions = "*" +files = [ + {file = "mmh3-4.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:be5ac76a8b0cd8095784e51e4c1c9c318c19edcd1709a06eb14979c8d850c31a"}, + {file = "mmh3-4.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:98a49121afdfab67cd80e912b36404139d7deceb6773a83620137aaa0da5714c"}, + {file = "mmh3-4.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5259ac0535874366e7d1a5423ef746e0d36a9e3c14509ce6511614bdc5a7ef5b"}, + {file = "mmh3-4.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5950827ca0453a2be357696da509ab39646044e3fa15cad364eb65d78797437"}, + {file = "mmh3-4.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1dd0f652ae99585b9dd26de458e5f08571522f0402155809fd1dc8852a613a39"}, + {file = "mmh3-4.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:99d25548070942fab1e4a6f04d1626d67e66d0b81ed6571ecfca511f3edf07e6"}, + {file = "mmh3-4.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:53db8d9bad3cb66c8f35cbc894f336273f63489ce4ac416634932e3cbe79eb5b"}, + {file = "mmh3-4.1.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75da0f615eb55295a437264cc0b736753f830b09d102aa4c2a7d719bc445ec05"}, + {file = "mmh3-4.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b926b07fd678ea84b3a2afc1fa22ce50aeb627839c44382f3d0291e945621e1a"}, + {file = "mmh3-4.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:c5b053334f9b0af8559d6da9dc72cef0a65b325ebb3e630c680012323c950bb6"}, + {file = "mmh3-4.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:5bf33dc43cd6de2cb86e0aa73a1cc6530f557854bbbe5d59f41ef6de2e353d7b"}, + {file = "mmh3-4.1.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:fa7eacd2b830727ba3dd65a365bed8a5c992ecd0c8348cf39a05cc77d22f4970"}, + {file = "mmh3-4.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:42dfd6742b9e3eec599f85270617debfa0bbb913c545bb980c8a4fa7b2d047da"}, + {file = "mmh3-4.1.0-cp310-cp310-win32.whl", hash = "sha256:2974ad343f0d39dcc88e93ee6afa96cedc35a9883bc067febd7ff736e207fa47"}, + {file = "mmh3-4.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:74699a8984ded645c1a24d6078351a056f5a5f1fe5838870412a68ac5e28d865"}, + {file = "mmh3-4.1.0-cp310-cp310-win_arm64.whl", hash = "sha256:f0dc874cedc23d46fc488a987faa6ad08ffa79e44fb08e3cd4d4cf2877c00a00"}, + {file = "mmh3-4.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3280a463855b0eae64b681cd5b9ddd9464b73f81151e87bb7c91a811d25619e6"}, + {file = "mmh3-4.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:97ac57c6c3301769e757d444fa7c973ceb002cb66534b39cbab5e38de61cd896"}, + {file = "mmh3-4.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a7b6502cdb4dbd880244818ab363c8770a48cdccecf6d729ade0241b736b5ec0"}, + {file = "mmh3-4.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52ba2da04671a9621580ddabf72f06f0e72c1c9c3b7b608849b58b11080d8f14"}, + {file = "mmh3-4.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5a5fef4c4ecc782e6e43fbeab09cff1bac82c998a1773d3a5ee6a3605cde343e"}, + {file = "mmh3-4.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5135358a7e00991f73b88cdc8eda5203bf9de22120d10a834c5761dbeb07dd13"}, + {file = "mmh3-4.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cff9ae76a54f7c6fe0167c9c4028c12c1f6de52d68a31d11b6790bb2ae685560"}, + {file = "mmh3-4.1.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6f02576a4d106d7830ca90278868bf0983554dd69183b7bbe09f2fcd51cf54f"}, + {file = "mmh3-4.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:073d57425a23721730d3ff5485e2da489dd3c90b04e86243dd7211f889898106"}, + {file = "mmh3-4.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:71e32ddec7f573a1a0feb8d2cf2af474c50ec21e7a8263026e8d3b4b629805db"}, + {file = "mmh3-4.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7cbb20b29d57e76a58b40fd8b13a9130db495a12d678d651b459bf61c0714cea"}, + {file = "mmh3-4.1.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:a42ad267e131d7847076bb7e31050f6c4378cd38e8f1bf7a0edd32f30224d5c9"}, + {file = "mmh3-4.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4a013979fc9390abadc445ea2527426a0e7a4495c19b74589204f9b71bcaafeb"}, + {file = "mmh3-4.1.0-cp311-cp311-win32.whl", hash = "sha256:1d3b1cdad7c71b7b88966301789a478af142bddcb3a2bee563f7a7d40519a00f"}, + {file = "mmh3-4.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:0dc6dc32eb03727467da8e17deffe004fbb65e8b5ee2b502d36250d7a3f4e2ec"}, + {file = "mmh3-4.1.0-cp311-cp311-win_arm64.whl", hash = "sha256:9ae3a5c1b32dda121c7dc26f9597ef7b01b4c56a98319a7fe86c35b8bc459ae6"}, + {file = "mmh3-4.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0033d60c7939168ef65ddc396611077a7268bde024f2c23bdc283a19123f9e9c"}, + {file = "mmh3-4.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d6af3e2287644b2b08b5924ed3a88c97b87b44ad08e79ca9f93d3470a54a41c5"}, + {file = "mmh3-4.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d82eb4defa245e02bb0b0dc4f1e7ee284f8d212633389c91f7fba99ba993f0a2"}, + {file = "mmh3-4.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba245e94b8d54765e14c2d7b6214e832557e7856d5183bc522e17884cab2f45d"}, + {file = "mmh3-4.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb04e2feeabaad6231e89cd43b3d01a4403579aa792c9ab6fdeef45cc58d4ec0"}, + {file = "mmh3-4.1.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1e3b1a27def545ce11e36158ba5d5390cdbc300cfe456a942cc89d649cf7e3b2"}, + {file = "mmh3-4.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce0ab79ff736d7044e5e9b3bfe73958a55f79a4ae672e6213e92492ad5e734d5"}, + {file = "mmh3-4.1.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b02268be6e0a8eeb8a924d7db85f28e47344f35c438c1e149878bb1c47b1cd3"}, + {file = "mmh3-4.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:deb887f5fcdaf57cf646b1e062d56b06ef2f23421c80885fce18b37143cba828"}, + {file = "mmh3-4.1.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:99dd564e9e2b512eb117bd0cbf0f79a50c45d961c2a02402787d581cec5448d5"}, + {file = "mmh3-4.1.0-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:08373082dfaa38fe97aa78753d1efd21a1969e51079056ff552e687764eafdfe"}, + {file = "mmh3-4.1.0-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:54b9c6a2ea571b714e4fe28d3e4e2db37abfd03c787a58074ea21ee9a8fd1740"}, + {file = "mmh3-4.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a7b1edf24c69e3513f879722b97ca85e52f9032f24a52284746877f6a7304086"}, + {file = "mmh3-4.1.0-cp312-cp312-win32.whl", hash = "sha256:411da64b951f635e1e2284b71d81a5a83580cea24994b328f8910d40bed67276"}, + {file = "mmh3-4.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:bebc3ecb6ba18292e3d40c8712482b4477abd6981c2ebf0e60869bd90f8ac3a9"}, + {file = "mmh3-4.1.0-cp312-cp312-win_arm64.whl", hash = "sha256:168473dd608ade6a8d2ba069600b35199a9af837d96177d3088ca91f2b3798e3"}, + {file = "mmh3-4.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:372f4b7e1dcde175507640679a2a8790185bb71f3640fc28a4690f73da986a3b"}, + {file = "mmh3-4.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:438584b97f6fe13e944faf590c90fc127682b57ae969f73334040d9fa1c7ffa5"}, + {file = "mmh3-4.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6e27931b232fc676675fac8641c6ec6b596daa64d82170e8597f5a5b8bdcd3b6"}, + {file = "mmh3-4.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:571a92bad859d7b0330e47cfd1850b76c39b615a8d8e7aa5853c1f971fd0c4b1"}, + {file = "mmh3-4.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a69d6afe3190fa08f9e3a58e5145549f71f1f3fff27bd0800313426929c7068"}, + {file = "mmh3-4.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:afb127be0be946b7630220908dbea0cee0d9d3c583fa9114a07156f98566dc28"}, + {file = "mmh3-4.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:940d86522f36348ef1a494cbf7248ab3f4a1638b84b59e6c9e90408bd11ad729"}, + {file = "mmh3-4.1.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3dcccc4935686619a8e3d1f7b6e97e3bd89a4a796247930ee97d35ea1a39341"}, + {file = "mmh3-4.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:01bb9b90d61854dfc2407c5e5192bfb47222d74f29d140cb2dd2a69f2353f7cc"}, + {file = "mmh3-4.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:bcb1b8b951a2c0b0fb8a5426c62a22557e2ffc52539e0a7cc46eb667b5d606a9"}, + {file = "mmh3-4.1.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:6477a05d5e5ab3168e82e8b106e316210ac954134f46ec529356607900aea82a"}, + {file = "mmh3-4.1.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:da5892287e5bea6977364b15712a2573c16d134bc5fdcdd4cf460006cf849278"}, + {file = "mmh3-4.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:99180d7fd2327a6fffbaff270f760576839dc6ee66d045fa3a450f3490fda7f5"}, + {file = "mmh3-4.1.0-cp38-cp38-win32.whl", hash = "sha256:9b0d4f3949913a9f9a8fb1bb4cc6ecd52879730aab5ff8c5a3d8f5b593594b73"}, + {file = "mmh3-4.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:598c352da1d945108aee0c3c3cfdd0e9b3edef74108f53b49d481d3990402169"}, + {file = "mmh3-4.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:475d6d1445dd080f18f0f766277e1237fa2914e5fe3307a3b2a3044f30892103"}, + {file = "mmh3-4.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5ca07c41e6a2880991431ac717c2a049056fff497651a76e26fc22224e8b5732"}, + {file = "mmh3-4.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0ebe052fef4bbe30c0548d12ee46d09f1b69035ca5208a7075e55adfe091be44"}, + {file = "mmh3-4.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eaefd42e85afb70f2b855a011f7b4d8a3c7e19c3f2681fa13118e4d8627378c5"}, + {file = "mmh3-4.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac0ae43caae5a47afe1b63a1ae3f0986dde54b5fb2d6c29786adbfb8edc9edfb"}, + {file = "mmh3-4.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6218666f74c8c013c221e7f5f8a693ac9cf68e5ac9a03f2373b32d77c48904de"}, + {file = "mmh3-4.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ac59294a536ba447b5037f62d8367d7d93b696f80671c2c45645fa9f1109413c"}, + {file = "mmh3-4.1.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:086844830fcd1e5c84fec7017ea1ee8491487cfc877847d96f86f68881569d2e"}, + {file = "mmh3-4.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e42b38fad664f56f77f6fbca22d08450f2464baa68acdbf24841bf900eb98e87"}, + {file = "mmh3-4.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d08b790a63a9a1cde3b5d7d733ed97d4eb884bfbc92f075a091652d6bfd7709a"}, + {file = "mmh3-4.1.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:73ea4cc55e8aea28c86799ecacebca09e5f86500414870a8abaedfcbaf74d288"}, + {file = "mmh3-4.1.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:f90938ff137130e47bcec8dc1f4ceb02f10178c766e2ef58a9f657ff1f62d124"}, + {file = "mmh3-4.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:aa1f13e94b8631c8cd53259250556edcf1de71738936b60febba95750d9632bd"}, + {file = "mmh3-4.1.0-cp39-cp39-win32.whl", hash = "sha256:a3b680b471c181490cf82da2142029edb4298e1bdfcb67c76922dedef789868d"}, + {file = "mmh3-4.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:fefef92e9c544a8dbc08f77a8d1b6d48006a750c4375bbcd5ff8199d761e263b"}, + {file = "mmh3-4.1.0-cp39-cp39-win_arm64.whl", hash = "sha256:8e2c1f6a2b41723a4f82bd5a762a777836d29d664fc0095f17910bea0adfd4a6"}, + {file = "mmh3-4.1.0.tar.gz", hash = "sha256:a1cf25348b9acd229dda464a094d6170f47d2850a1fcb762a3b6172d2ce6ca4a"}, +] + +[package.extras] +test = ["mypy (>=1.0)", "pytest (>=7.0.0)"] + +[[package]] +name = "monotonic" +version = "1.6" +description = "An implementation of time.monotonic() for Python 2 & < 3.3" +optional = false +python-versions = "*" +files = [ + {file = "monotonic-1.6-py2.py3-none-any.whl", hash = "sha256:68687e19a14f11f26d140dd5c86f3dba4bf5df58003000ed467e0e2a69bca96c"}, + {file = "monotonic-1.6.tar.gz", hash = "sha256:3a55207bcfed53ddd5c5bae174524062935efed17792e9de2ad0205ce9ad63f7"}, +] + +[[package]] +name = "mpmath" +version = "1.3.0" +description = "Python library for arbitrary-precision floating-point arithmetic" +optional = false +python-versions = "*" +files = [ + {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"}, + {file = "mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f"}, +] + +[package.extras] +develop = ["codecov", "pycodestyle", "pytest (>=4.6)", "pytest-cov", "wheel"] +docs = ["sphinx"] +gmpy = ["gmpy2 (>=2.1.0a4)"] +tests = ["pytest (>=4.6)"] + +[[package]] +name = "multidict" +version = "6.0.4" +description = "multidict implementation" +optional = false +python-versions = ">=3.7" +files = [ + {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b1a97283e0c85772d613878028fec909f003993e1007eafa715b24b377cb9b8"}, + {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eeb6dcc05e911516ae3d1f207d4b0520d07f54484c49dfc294d6e7d63b734171"}, + {file = "multidict-6.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d6d635d5209b82a3492508cf5b365f3446afb65ae7ebd755e70e18f287b0adf7"}, + {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c048099e4c9e9d615545e2001d3d8a4380bd403e1a0578734e0d31703d1b0c0b"}, + {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ea20853c6dbbb53ed34cb4d080382169b6f4554d394015f1bef35e881bf83547"}, + {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16d232d4e5396c2efbbf4f6d4df89bfa905eb0d4dc5b3549d872ab898451f569"}, + {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36c63aaa167f6c6b04ef2c85704e93af16c11d20de1d133e39de6a0e84582a93"}, + {file = "multidict-6.0.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:64bdf1086b6043bf519869678f5f2757f473dee970d7abf6da91ec00acb9cb98"}, + {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:43644e38f42e3af682690876cff722d301ac585c5b9e1eacc013b7a3f7b696a0"}, + {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7582a1d1030e15422262de9f58711774e02fa80df0d1578995c76214f6954988"}, + {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ddff9c4e225a63a5afab9dd15590432c22e8057e1a9a13d28ed128ecf047bbdc"}, + {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ee2a1ece51b9b9e7752e742cfb661d2a29e7bcdba2d27e66e28a99f1890e4fa0"}, + {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a2e4369eb3d47d2034032a26c7a80fcb21a2cb22e1173d761a162f11e562caa5"}, + {file = "multidict-6.0.4-cp310-cp310-win32.whl", hash = "sha256:574b7eae1ab267e5f8285f0fe881f17efe4b98c39a40858247720935b893bba8"}, + {file = "multidict-6.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:4dcbb0906e38440fa3e325df2359ac6cb043df8e58c965bb45f4e406ecb162cc"}, + {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0dfad7a5a1e39c53ed00d2dd0c2e36aed4650936dc18fd9a1826a5ae1cad6f03"}, + {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:64da238a09d6039e3bd39bb3aee9c21a5e34f28bfa5aa22518581f910ff94af3"}, + {file = "multidict-6.0.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ff959bee35038c4624250473988b24f846cbeb2c6639de3602c073f10410ceba"}, + {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:01a3a55bd90018c9c080fbb0b9f4891db37d148a0a18722b42f94694f8b6d4c9"}, + {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5cb09abb18c1ea940fb99360ea0396f34d46566f157122c92dfa069d3e0e982"}, + {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:666daae833559deb2d609afa4490b85830ab0dfca811a98b70a205621a6109fe"}, + {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11bdf3f5e1518b24530b8241529d2050014c884cf18b6fc69c0c2b30ca248710"}, + {file = "multidict-6.0.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d18748f2d30f94f498e852c67d61261c643b349b9d2a581131725595c45ec6c"}, + {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:458f37be2d9e4c95e2d8866a851663cbc76e865b78395090786f6cd9b3bbf4f4"}, + {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:b1a2eeedcead3a41694130495593a559a668f382eee0727352b9a41e1c45759a"}, + {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7d6ae9d593ef8641544d6263c7fa6408cc90370c8cb2bbb65f8d43e5b0351d9c"}, + {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:5979b5632c3e3534e42ca6ff856bb24b2e3071b37861c2c727ce220d80eee9ed"}, + {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dcfe792765fab89c365123c81046ad4103fcabbc4f56d1c1997e6715e8015461"}, + {file = "multidict-6.0.4-cp311-cp311-win32.whl", hash = "sha256:3601a3cece3819534b11d4efc1eb76047488fddd0c85a3948099d5da4d504636"}, + {file = "multidict-6.0.4-cp311-cp311-win_amd64.whl", hash = "sha256:81a4f0b34bd92df3da93315c6a59034df95866014ac08535fc819f043bfd51f0"}, + {file = "multidict-6.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:67040058f37a2a51ed8ea8f6b0e6ee5bd78ca67f169ce6122f3e2ec80dfe9b78"}, + {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:853888594621e6604c978ce2a0444a1e6e70c8d253ab65ba11657659dcc9100f"}, + {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:39ff62e7d0f26c248b15e364517a72932a611a9b75f35b45be078d81bdb86603"}, + {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:af048912e045a2dc732847d33821a9d84ba553f5c5f028adbd364dd4765092ac"}, + {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e8b901e607795ec06c9e42530788c45ac21ef3aaa11dbd0c69de543bfb79a9"}, + {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62501642008a8b9871ddfccbf83e4222cf8ac0d5aeedf73da36153ef2ec222d2"}, + {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:99b76c052e9f1bc0721f7541e5e8c05db3941eb9ebe7b8553c625ef88d6eefde"}, + {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:509eac6cf09c794aa27bcacfd4d62c885cce62bef7b2c3e8b2e49d365b5003fe"}, + {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:21a12c4eb6ddc9952c415f24eef97e3e55ba3af61f67c7bc388dcdec1404a067"}, + {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:5cad9430ab3e2e4fa4a2ef4450f548768400a2ac635841bc2a56a2052cdbeb87"}, + {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ab55edc2e84460694295f401215f4a58597f8f7c9466faec545093045476327d"}, + {file = "multidict-6.0.4-cp37-cp37m-win32.whl", hash = "sha256:5a4dcf02b908c3b8b17a45fb0f15b695bf117a67b76b7ad18b73cf8e92608775"}, + {file = "multidict-6.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:6ed5f161328b7df384d71b07317f4d8656434e34591f20552c7bcef27b0ab88e"}, + {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5fc1b16f586f049820c5c5b17bb4ee7583092fa0d1c4e28b5239181ff9532e0c"}, + {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1502e24330eb681bdaa3eb70d6358e818e8e8f908a22a1851dfd4e15bc2f8161"}, + {file = "multidict-6.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b692f419760c0e65d060959df05f2a531945af31fda0c8a3b3195d4efd06de11"}, + {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45e1ecb0379bfaab5eef059f50115b54571acfbe422a14f668fc8c27ba410e7e"}, + {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddd3915998d93fbcd2566ddf9cf62cdb35c9e093075f862935573d265cf8f65d"}, + {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:59d43b61c59d82f2effb39a93c48b845efe23a3852d201ed2d24ba830d0b4cf2"}, + {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc8e1d0c705233c5dd0c5e6460fbad7827d5d36f310a0fadfd45cc3029762258"}, + {file = "multidict-6.0.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6aa0418fcc838522256761b3415822626f866758ee0bc6632c9486b179d0b52"}, + {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6748717bb10339c4760c1e63da040f5f29f5ed6e59d76daee30305894069a660"}, + {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4d1a3d7ef5e96b1c9e92f973e43aa5e5b96c659c9bc3124acbbd81b0b9c8a951"}, + {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4372381634485bec7e46718edc71528024fcdc6f835baefe517b34a33c731d60"}, + {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:fc35cb4676846ef752816d5be2193a1e8367b4c1397b74a565a9d0389c433a1d"}, + {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4b9d9e4e2b37daddb5c23ea33a3417901fa7c7b3dee2d855f63ee67a0b21e5b1"}, + {file = "multidict-6.0.4-cp38-cp38-win32.whl", hash = "sha256:e41b7e2b59679edfa309e8db64fdf22399eec4b0b24694e1b2104fb789207779"}, + {file = "multidict-6.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:d6c254ba6e45d8e72739281ebc46ea5eb5f101234f3ce171f0e9f5cc86991480"}, + {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:16ab77bbeb596e14212e7bab8429f24c1579234a3a462105cda4a66904998664"}, + {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc779e9e6f7fda81b3f9aa58e3a6091d49ad528b11ed19f6621408806204ad35"}, + {file = "multidict-6.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ceef517eca3e03c1cceb22030a3e39cb399ac86bff4e426d4fc6ae49052cc60"}, + {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:281af09f488903fde97923c7744bb001a9b23b039a909460d0f14edc7bf59706"}, + {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52f2dffc8acaba9a2f27174c41c9e57f60b907bb9f096b36b1a1f3be71c6284d"}, + {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b41156839806aecb3641f3208c0dafd3ac7775b9c4c422d82ee2a45c34ba81ca"}, + {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5e3fc56f88cc98ef8139255cf8cd63eb2c586531e43310ff859d6bb3a6b51f1"}, + {file = "multidict-6.0.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8316a77808c501004802f9beebde51c9f857054a0c871bd6da8280e718444449"}, + {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f70b98cd94886b49d91170ef23ec5c0e8ebb6f242d734ed7ed677b24d50c82cf"}, + {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bf6774e60d67a9efe02b3616fee22441d86fab4c6d335f9d2051d19d90a40063"}, + {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:e69924bfcdda39b722ef4d9aa762b2dd38e4632b3641b1d9a57ca9cd18f2f83a"}, + {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:6b181d8c23da913d4ff585afd1155a0e1194c0b50c54fcfe286f70cdaf2b7176"}, + {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:52509b5be062d9eafc8170e53026fbc54cf3b32759a23d07fd935fb04fc22d95"}, + {file = "multidict-6.0.4-cp39-cp39-win32.whl", hash = "sha256:27c523fbfbdfd19c6867af7346332b62b586eed663887392cff78d614f9ec313"}, + {file = "multidict-6.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:33029f5734336aa0d4c0384525da0387ef89148dc7191aae00ca5fb23d7aafc2"}, + {file = "multidict-6.0.4.tar.gz", hash = "sha256:3666906492efb76453c0e7b97f2cf459b0682e7402c0489a95484965dbc1da49"}, +] + +[[package]] +name = "murmurhash" +version = "1.0.10" +description = "Cython bindings for MurmurHash" +optional = false +python-versions = ">=3.6" +files = [ + {file = "murmurhash-1.0.10-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3e90eef568adca5e17a91f96975e9a782ace3a617bbb3f8c8c2d917096e9bfeb"}, + {file = "murmurhash-1.0.10-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f8ecb00cc1ab57e4b065f9fb3ea923b55160c402d959c69a0b6dbbe8bc73efc3"}, + {file = "murmurhash-1.0.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3310101004d9e2e0530c2fed30174448d998ffd1b50dcbfb7677e95db101aa4b"}, + {file = "murmurhash-1.0.10-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c65401a6f1778676253cbf89c1f45a8a7feb7d73038e483925df7d5943c08ed9"}, + {file = "murmurhash-1.0.10-cp310-cp310-win_amd64.whl", hash = "sha256:f23f2dfc7174de2cdc5007c0771ab8376a2a3f48247f32cac4a5563e40c6adcc"}, + {file = "murmurhash-1.0.10-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:90ed37ee2cace9381b83d56068334f77e3e30bc521169a1f886a2a2800e965d6"}, + {file = "murmurhash-1.0.10-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:22e9926fdbec9d24ced9b0a42f0fee68c730438be3cfb00c2499fd495caec226"}, + {file = "murmurhash-1.0.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54bfbfd68baa99717239b8844600db627f336a08b1caf4df89762999f681cdd1"}, + {file = "murmurhash-1.0.10-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18b9d200a09d48ef67f6840b77c14f151f2b6c48fd69661eb75c7276ebdb146c"}, + {file = "murmurhash-1.0.10-cp311-cp311-win_amd64.whl", hash = "sha256:e5d7cfe392c0a28129226271008e61e77bf307afc24abf34f386771daa7b28b0"}, + {file = "murmurhash-1.0.10-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:96f0a070344d4802ea76a160e0d4c88b7dc10454d2426f48814482ba60b38b9e"}, + {file = "murmurhash-1.0.10-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9f61862060d677c84556610ac0300a0776cb13cb3155f5075ed97e80f86e55d9"}, + {file = "murmurhash-1.0.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b3b6d2d877d8881a08be66d906856d05944be0faf22b9a0390338bcf45299989"}, + {file = "murmurhash-1.0.10-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8f54b0031d8696fed17ed6e9628f339cdea0ba2367ca051e18ff59193f52687"}, + {file = "murmurhash-1.0.10-cp312-cp312-win_amd64.whl", hash = "sha256:97e09d675de2359e586f09de1d0de1ab39f9911edffc65c9255fb5e04f7c1f85"}, + {file = "murmurhash-1.0.10-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b64e5332932993fef598e78d633b1ba664789ab73032ed511f3dc615a631a1a"}, + {file = "murmurhash-1.0.10-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e2a38437a8497e082408aa015c6d90554b9e00c2c221fdfa79728a2d99a739e"}, + {file = "murmurhash-1.0.10-cp36-cp36m-win_amd64.whl", hash = "sha256:55f4e4f9291a53c36070330950b472d72ba7d331e4ce3ce1ab349a4f458f7bc4"}, + {file = "murmurhash-1.0.10-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:16ef9f0855952493fe08929d23865425906a8c0c40607ac8a949a378652ba6a9"}, + {file = "murmurhash-1.0.10-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cc3351ae92b89c2fcdc6e41ac6f17176dbd9b3554c96109fd0713695d8663e7"}, + {file = "murmurhash-1.0.10-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6559fef7c2e7349a42a63549067709b656d6d1580752bd76be1541d8b2d65718"}, + {file = "murmurhash-1.0.10-cp37-cp37m-win_amd64.whl", hash = "sha256:8bf49e3bb33febb7057ae3a5d284ef81243a1e55eaa62bdcd79007cddbdc0461"}, + {file = "murmurhash-1.0.10-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f1605fde07030516eb63d77a598dd164fb9bf217fd937dbac588fe7e47a28c40"}, + {file = "murmurhash-1.0.10-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4904f7e68674a64eb2b08823c72015a5e14653e0b4b109ea00c652a005a59bad"}, + {file = "murmurhash-1.0.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0438f0cb44cf1cd26251f72c1428213c4197d40a4e3f48b1efc3aea12ce18517"}, + {file = "murmurhash-1.0.10-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db1171a3f9a10571931764cdbfaa5371f4cf5c23c680639762125cb075b833a5"}, + {file = "murmurhash-1.0.10-cp38-cp38-win_amd64.whl", hash = "sha256:1c9fbcd7646ad8ba67b895f71d361d232c6765754370ecea473dd97d77afe99f"}, + {file = "murmurhash-1.0.10-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7024ab3498434f22f8e642ae31448322ad8228c65c8d9e5dc2d563d57c14c9b8"}, + {file = "murmurhash-1.0.10-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a99dedfb7f0cc5a4cd76eb409ee98d3d50eba024f934e705914f6f4d765aef2c"}, + {file = "murmurhash-1.0.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b580b8503647de5dd7972746b7613ea586270f17ac92a44872a9b1b52c36d68"}, + {file = "murmurhash-1.0.10-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d75840212bf75eb1352c946c3cf1622dacddd6d6bdda34368237d1eb3568f23a"}, + {file = "murmurhash-1.0.10-cp39-cp39-win_amd64.whl", hash = "sha256:a4209962b9f85de397c3203ea4b3a554da01ae9fd220fdab38757d4e9eba8d1a"}, + {file = "murmurhash-1.0.10.tar.gz", hash = "sha256:5282aab1317804c6ebd6dd7f69f15ba9075aee671c44a34be2bde0f1b11ef88a"}, +] + +[[package]] +name = "mypy" +version = "1.8.0" +description = "Optional static typing for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "mypy-1.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:485a8942f671120f76afffff70f259e1cd0f0cfe08f81c05d8816d958d4577d3"}, + {file = "mypy-1.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:df9824ac11deaf007443e7ed2a4a26bebff98d2bc43c6da21b2b64185da011c4"}, + {file = "mypy-1.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2afecd6354bbfb6e0160f4e4ad9ba6e4e003b767dd80d85516e71f2e955ab50d"}, + {file = "mypy-1.8.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8963b83d53ee733a6e4196954502b33567ad07dfd74851f32be18eb932fb1cb9"}, + {file = "mypy-1.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:e46f44b54ebddbeedbd3d5b289a893219065ef805d95094d16a0af6630f5d410"}, + {file = "mypy-1.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:855fe27b80375e5c5878492f0729540db47b186509c98dae341254c8f45f42ae"}, + {file = "mypy-1.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4c886c6cce2d070bd7df4ec4a05a13ee20c0aa60cb587e8d1265b6c03cf91da3"}, + {file = "mypy-1.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d19c413b3c07cbecf1f991e2221746b0d2a9410b59cb3f4fb9557f0365a1a817"}, + {file = "mypy-1.8.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9261ed810972061388918c83c3f5cd46079d875026ba97380f3e3978a72f503d"}, + {file = "mypy-1.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:51720c776d148bad2372ca21ca29256ed483aa9a4cdefefcef49006dff2a6835"}, + {file = "mypy-1.8.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:52825b01f5c4c1c4eb0db253ec09c7aa17e1a7304d247c48b6f3599ef40db8bd"}, + {file = "mypy-1.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f5ac9a4eeb1ec0f1ccdc6f326bcdb464de5f80eb07fb38b5ddd7b0de6bc61e55"}, + {file = "mypy-1.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afe3fe972c645b4632c563d3f3eff1cdca2fa058f730df2b93a35e3b0c538218"}, + {file = "mypy-1.8.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:42c6680d256ab35637ef88891c6bd02514ccb7e1122133ac96055ff458f93fc3"}, + {file = "mypy-1.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:720a5ca70e136b675af3af63db533c1c8c9181314d207568bbe79051f122669e"}, + {file = "mypy-1.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:028cf9f2cae89e202d7b6593cd98db6759379f17a319b5faf4f9978d7084cdc6"}, + {file = "mypy-1.8.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4e6d97288757e1ddba10dd9549ac27982e3e74a49d8d0179fc14d4365c7add66"}, + {file = "mypy-1.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f1478736fcebb90f97e40aff11a5f253af890c845ee0c850fe80aa060a267c6"}, + {file = "mypy-1.8.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42419861b43e6962a649068a61f4a4839205a3ef525b858377a960b9e2de6e0d"}, + {file = "mypy-1.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:2b5b6c721bd4aabaadead3a5e6fa85c11c6c795e0c81a7215776ef8afc66de02"}, + {file = "mypy-1.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5c1538c38584029352878a0466f03a8ee7547d7bd9f641f57a0f3017a7c905b8"}, + {file = "mypy-1.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ef4be7baf08a203170f29e89d79064463b7fc7a0908b9d0d5114e8009c3a259"}, + {file = "mypy-1.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7178def594014aa6c35a8ff411cf37d682f428b3b5617ca79029d8ae72f5402b"}, + {file = "mypy-1.8.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ab3c84fa13c04aeeeabb2a7f67a25ef5d77ac9d6486ff33ded762ef353aa5592"}, + {file = "mypy-1.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:99b00bc72855812a60d253420d8a2eae839b0afa4938f09f4d2aa9bb4654263a"}, + {file = "mypy-1.8.0-py3-none-any.whl", hash = "sha256:538fd81bb5e430cc1381a443971c0475582ff9f434c16cd46d2c66763ce85d9d"}, + {file = "mypy-1.8.0.tar.gz", hash = "sha256:6ff8b244d7085a0b425b56d327b480c3b29cafbd2eff27316a004f9a7391ae07"}, +] + +[package.dependencies] +mypy-extensions = ">=1.0.0" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typing-extensions = ">=4.1.0" + +[package.extras] +dmypy = ["psutil (>=4.0)"] +install-types = ["pip"] +mypyc = ["setuptools (>=50)"] +reports = ["lxml"] + +[[package]] +name = "mypy-boto3-s3" +version = "1.34.14" +description = "Type annotations for boto3.S3 1.34.14 service generated with mypy-boto3-builder 7.21.0" +optional = false +python-versions = ">=3.8" +files = [ + {file = "mypy-boto3-s3-1.34.14.tar.gz", hash = "sha256:71c39ab0623cdb442d225b71c1783f6a513cff4c4a13505a2efbb2e3aff2e965"}, + {file = "mypy_boto3_s3-1.34.14-py3-none-any.whl", hash = "sha256:f9669ecd182d5bf3532f5f2dcc5e5237776afe157ad5a0b37b26d6bec5fcc432"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.12\""} + +[[package]] +name = "mypy-extensions" +version = "1.0.0" +description = "Type system extensions for programs checked with the mypy type checker." +optional = false +python-versions = ">=3.5" +files = [ + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, +] + +[[package]] +name = "networkx" +version = "3.2.1" +description = "Python package for creating and manipulating graphs and networks" +optional = true +python-versions = ">=3.9" +files = [ + {file = "networkx-3.2.1-py3-none-any.whl", hash = "sha256:f18c69adc97877c42332c170849c96cefa91881c99a7cb3e95b7c659ebdc1ec2"}, + {file = "networkx-3.2.1.tar.gz", hash = "sha256:9f1bb5cf3409bf324e0a722c20bdb4c20ee39bf1c30ce8ae499c8502b0b5e0c6"}, +] + +[package.extras] +default = ["matplotlib (>=3.5)", "numpy (>=1.22)", "pandas (>=1.4)", "scipy (>=1.9,!=1.11.0,!=1.11.1)"] +developer = ["changelist (==0.4)", "mypy (>=1.1)", "pre-commit (>=3.2)", "rtoml"] +doc = ["nb2plots (>=0.7)", "nbconvert (<7.9)", "numpydoc (>=1.6)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.14)", "sphinx (>=7)", "sphinx-gallery (>=0.14)", "texext (>=0.6.7)"] +extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.11)", "sympy (>=1.10)"] +test = ["pytest (>=7.2)", "pytest-cov (>=4.0)"] + +[[package]] +name = "nodeenv" +version = "1.8.0" +description = "Node.js virtual environment builder" +optional = false +python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*" +files = [ + {file = "nodeenv-1.8.0-py2.py3-none-any.whl", hash = "sha256:df865724bb3c3adc86b3876fa209771517b0cfe596beff01a92700e0e8be4cec"}, + {file = "nodeenv-1.8.0.tar.gz", hash = "sha256:d51e0c37e64fbf47d017feac3145cdbb58836d7eee8c6f6d3b6880c5456227d2"}, +] + +[package.dependencies] +setuptools = "*" + +[[package]] +name = "numpy" +version = "1.26.3" +description = "Fundamental package for array computing in Python" +optional = false +python-versions = ">=3.9" +files = [ + {file = "numpy-1.26.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:806dd64230dbbfaca8a27faa64e2f414bf1c6622ab78cc4264f7f5f028fee3bf"}, + {file = "numpy-1.26.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:02f98011ba4ab17f46f80f7f8f1c291ee7d855fcef0a5a98db80767a468c85cd"}, + {file = "numpy-1.26.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d45b3ec2faed4baca41c76617fcdcfa4f684ff7a151ce6fc78ad3b6e85af0a6"}, + {file = "numpy-1.26.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bdd2b45bf079d9ad90377048e2747a0c82351989a2165821f0c96831b4a2a54b"}, + {file = "numpy-1.26.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:211ddd1e94817ed2d175b60b6374120244a4dd2287f4ece45d49228b4d529178"}, + {file = "numpy-1.26.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b1240f767f69d7c4c8a29adde2310b871153df9b26b5cb2b54a561ac85146485"}, + {file = "numpy-1.26.3-cp310-cp310-win32.whl", hash = "sha256:21a9484e75ad018974a2fdaa216524d64ed4212e418e0a551a2d83403b0531d3"}, + {file = "numpy-1.26.3-cp310-cp310-win_amd64.whl", hash = "sha256:9e1591f6ae98bcfac2a4bbf9221c0b92ab49762228f38287f6eeb5f3f55905ce"}, + {file = "numpy-1.26.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b831295e5472954104ecb46cd98c08b98b49c69fdb7040483aff799a755a7374"}, + {file = "numpy-1.26.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9e87562b91f68dd8b1c39149d0323b42e0082db7ddb8e934ab4c292094d575d6"}, + {file = "numpy-1.26.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c66d6fec467e8c0f975818c1796d25c53521124b7cfb760114be0abad53a0a2"}, + {file = "numpy-1.26.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f25e2811a9c932e43943a2615e65fc487a0b6b49218899e62e426e7f0a57eeda"}, + {file = "numpy-1.26.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:af36e0aa45e25c9f57bf684b1175e59ea05d9a7d3e8e87b7ae1a1da246f2767e"}, + {file = "numpy-1.26.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:51c7f1b344f302067b02e0f5b5d2daa9ed4a721cf49f070280ac202738ea7f00"}, + {file = "numpy-1.26.3-cp311-cp311-win32.whl", hash = "sha256:7ca4f24341df071877849eb2034948459ce3a07915c2734f1abb4018d9c49d7b"}, + {file = "numpy-1.26.3-cp311-cp311-win_amd64.whl", hash = "sha256:39763aee6dfdd4878032361b30b2b12593fb445ddb66bbac802e2113eb8a6ac4"}, + {file = "numpy-1.26.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a7081fd19a6d573e1a05e600c82a1c421011db7935ed0d5c483e9dd96b99cf13"}, + {file = "numpy-1.26.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:12c70ac274b32bc00c7f61b515126c9205323703abb99cd41836e8125ea0043e"}, + {file = "numpy-1.26.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f784e13e598e9594750b2ef6729bcd5a47f6cfe4a12cca13def35e06d8163e3"}, + {file = "numpy-1.26.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f24750ef94d56ce6e33e4019a8a4d68cfdb1ef661a52cdaee628a56d2437419"}, + {file = "numpy-1.26.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:77810ef29e0fb1d289d225cabb9ee6cf4d11978a00bb99f7f8ec2132a84e0166"}, + {file = "numpy-1.26.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8ed07a90f5450d99dad60d3799f9c03c6566709bd53b497eb9ccad9a55867f36"}, + {file = "numpy-1.26.3-cp312-cp312-win32.whl", hash = "sha256:f73497e8c38295aaa4741bdfa4fda1a5aedda5473074369eca10626835445511"}, + {file = "numpy-1.26.3-cp312-cp312-win_amd64.whl", hash = "sha256:da4b0c6c699a0ad73c810736303f7fbae483bcb012e38d7eb06a5e3b432c981b"}, + {file = "numpy-1.26.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1666f634cb3c80ccbd77ec97bc17337718f56d6658acf5d3b906ca03e90ce87f"}, + {file = "numpy-1.26.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:18c3319a7d39b2c6a9e3bb75aab2304ab79a811ac0168a671a62e6346c29b03f"}, + {file = "numpy-1.26.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b7e807d6888da0db6e7e75838444d62495e2b588b99e90dd80c3459594e857b"}, + {file = "numpy-1.26.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4d362e17bcb0011738c2d83e0a65ea8ce627057b2fdda37678f4374a382a137"}, + {file = "numpy-1.26.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b8c275f0ae90069496068c714387b4a0eba5d531aace269559ff2b43655edd58"}, + {file = "numpy-1.26.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cc0743f0302b94f397a4a65a660d4cd24267439eb16493fb3caad2e4389bccbb"}, + {file = "numpy-1.26.3-cp39-cp39-win32.whl", hash = "sha256:9bc6d1a7f8cedd519c4b7b1156d98e051b726bf160715b769106661d567b3f03"}, + {file = "numpy-1.26.3-cp39-cp39-win_amd64.whl", hash = "sha256:867e3644e208c8922a3be26fc6bbf112a035f50f0a86497f98f228c50c607bb2"}, + {file = "numpy-1.26.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3c67423b3703f8fbd90f5adaa37f85b5794d3366948efe9a5190a5f3a83fc34e"}, + {file = "numpy-1.26.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46f47ee566d98849323f01b349d58f2557f02167ee301e5e28809a8c0e27a2d0"}, + {file = "numpy-1.26.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a8474703bffc65ca15853d5fd4d06b18138ae90c17c8d12169968e998e448bb5"}, + {file = "numpy-1.26.3.tar.gz", hash = "sha256:697df43e2b6310ecc9d95f05d5ef20eacc09c7c4ecc9da3f235d39e71b7da1e4"}, +] + +[[package]] +name = "oauthlib" +version = "3.2.2" +description = "A generic, spec-compliant, thorough implementation of the OAuth request-signing logic" +optional = false +python-versions = ">=3.6" +files = [ + {file = "oauthlib-3.2.2-py3-none-any.whl", hash = "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca"}, + {file = "oauthlib-3.2.2.tar.gz", hash = "sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918"}, +] + +[package.extras] +rsa = ["cryptography (>=3.0.0)"] +signals = ["blinker (>=1.4.0)"] +signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] + +[[package]] +name = "onnxruntime" +version = "1.17.1" +description = "ONNX Runtime is a runtime accelerator for Machine Learning models" +optional = false +python-versions = "*" +files = [ + {file = "onnxruntime-1.17.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:d43ac17ac4fa3c9096ad3c0e5255bb41fd134560212dc124e7f52c3159af5d21"}, + {file = "onnxruntime-1.17.1-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:55b5e92a4c76a23981c998078b9bf6145e4fb0b016321a8274b1607bd3c6bd35"}, + {file = "onnxruntime-1.17.1-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ebbcd2bc3a066cf54e6f18c75708eb4d309ef42be54606d22e5bdd78afc5b0d7"}, + {file = "onnxruntime-1.17.1-cp310-cp310-win32.whl", hash = "sha256:5e3716b5eec9092e29a8d17aab55e737480487deabfca7eac3cd3ed952b6ada9"}, + {file = "onnxruntime-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:fbb98cced6782ae1bb799cc74ddcbbeeae8819f3ad1d942a74d88e72b6511337"}, + {file = "onnxruntime-1.17.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:36fd6f87a1ecad87e9c652e42407a50fb305374f9a31d71293eb231caae18784"}, + {file = "onnxruntime-1.17.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:99a8bddeb538edabc524d468edb60ad4722cff8a49d66f4e280c39eace70500b"}, + {file = "onnxruntime-1.17.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fd7fddb4311deb5a7d3390cd8e9b3912d4d963efbe4dfe075edbaf18d01c024e"}, + {file = "onnxruntime-1.17.1-cp311-cp311-win32.whl", hash = "sha256:606a7cbfb6680202b0e4f1890881041ffc3ac6e41760a25763bd9fe146f0b335"}, + {file = "onnxruntime-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:53e4e06c0a541696ebdf96085fd9390304b7b04b748a19e02cf3b35c869a1e76"}, + {file = "onnxruntime-1.17.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:40f08e378e0f85929712a2b2c9b9a9cc400a90c8a8ca741d1d92c00abec60843"}, + {file = "onnxruntime-1.17.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ac79da6d3e1bb4590f1dad4bb3c2979d7228555f92bb39820889af8b8e6bd472"}, + {file = "onnxruntime-1.17.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ae9ba47dc099004e3781f2d0814ad710a13c868c739ab086fc697524061695ea"}, + {file = "onnxruntime-1.17.1-cp312-cp312-win32.whl", hash = "sha256:2dff1a24354220ac30e4a4ce2fb1df38cb1ea59f7dac2c116238d63fe7f4c5ff"}, + {file = "onnxruntime-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:6226a5201ab8cafb15e12e72ff2a4fc8f50654e8fa5737c6f0bd57c5ff66827e"}, + {file = "onnxruntime-1.17.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:cd0c07c0d1dfb8629e820b05fda5739e4835b3b82faf43753d2998edf2cf00aa"}, + {file = "onnxruntime-1.17.1-cp38-cp38-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:617ebdf49184efa1ba6e4467e602fbfa029ed52c92f13ce3c9f417d303006381"}, + {file = "onnxruntime-1.17.1-cp38-cp38-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9dae9071e3facdf2920769dceee03b71c684b6439021defa45b830d05e148924"}, + {file = "onnxruntime-1.17.1-cp38-cp38-win32.whl", hash = "sha256:835d38fa1064841679433b1aa8138b5e1218ddf0cfa7a3ae0d056d8fd9cec713"}, + {file = "onnxruntime-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:96621e0c555c2453bf607606d08af3f70fbf6f315230c28ddea91754e17ad4e6"}, + {file = "onnxruntime-1.17.1-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:7a9539935fb2d78ebf2cf2693cad02d9930b0fb23cdd5cf37a7df813e977674d"}, + {file = "onnxruntime-1.17.1-cp39-cp39-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:45c6a384e9d9a29c78afff62032a46a993c477b280247a7e335df09372aedbe9"}, + {file = "onnxruntime-1.17.1-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4e19f966450f16863a1d6182a685ca33ae04d7772a76132303852d05b95411ea"}, + {file = "onnxruntime-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e2ae712d64a42aac29ed7a40a426cb1e624a08cfe9273dcfe681614aa65b07dc"}, + {file = "onnxruntime-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:f7e9f7fb049825cdddf4a923cfc7c649d84d63c0134315f8e0aa9e0c3004672c"}, +] + +[package.dependencies] +coloredlogs = "*" +flatbuffers = "*" +numpy = ">=1.21.6" +packaging = "*" +protobuf = "*" +sympy = "*" + +[[package]] +name = "openai" +version = "1.8.0" +description = "The official Python library for the openai API" +optional = false +python-versions = ">=3.7.1" +files = [ + {file = "openai-1.8.0-py3-none-any.whl", hash = "sha256:0f8f53805826103fdd8adaf379ad3ec23f9d867e698cbc14caf34b778d150175"}, + {file = "openai-1.8.0.tar.gz", hash = "sha256:93366be27802f517e89328801913d2a5ede45e3b86fdcab420385b8a1b88c767"}, +] + +[package.dependencies] +anyio = ">=3.5.0,<5" +distro = ">=1.7.0,<2" +httpx = ">=0.23.0,<1" +pydantic = ">=1.9.0,<3" +sniffio = "*" +tqdm = ">4" +typing-extensions = ">=4.7,<5" + +[package.extras] +datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] + +[[package]] +name = "openapi-python-client" +version = "0.14.1" +description = "Generate modern Python clients from OpenAPI" +optional = false +python-versions = ">=3.8,<4.0" +files = [ + {file = "openapi_python_client-0.14.1-py3-none-any.whl", hash = "sha256:765d16b5566bb03941f871b8bf493e2a3191f4cc96e8c7b4f337bd584619b78d"}, + {file = "openapi_python_client-0.14.1.tar.gz", hash = "sha256:13574d2f3acbccea615364e8e49789829fce254d2123a0a9c0918f36b7368018"}, +] + +[package.dependencies] +attrs = ">=21.3.0" +autoflake = ">=1.4,<3.0.0" +black = ">=23" +colorama = {version = ">=0.4.3,<0.5.0", markers = "sys_platform == \"win32\""} +httpx = ">=0.15.4,<0.25.0" +isort = ">=5.0.5,<6.0.0" +jinja2 = ">=3.0.0,<4.0.0" +pydantic = ">=1.6.1,<2.0.0" +python-dateutil = ">=2.8.1,<3.0.0" +PyYAML = ">=6.0,<7.0" +shellingham = ">=1.3.2,<2.0.0" +typer = ">0.6,<0.10" + +[[package]] +name = "opentelemetry-api" +version = "1.22.0" +description = "OpenTelemetry Python API" +optional = false +python-versions = ">=3.7" +files = [ + {file = "opentelemetry_api-1.22.0-py3-none-any.whl", hash = "sha256:43621514301a7e9f5d06dd8013a1b450f30c2e9372b8e30aaeb4562abf2ce034"}, + {file = "opentelemetry_api-1.22.0.tar.gz", hash = "sha256:15ae4ca925ecf9cfdfb7a709250846fbb08072260fca08ade78056c502b86bed"}, +] + +[package.dependencies] +deprecated = ">=1.2.6" +importlib-metadata = ">=6.0,<7.0" + +[[package]] +name = "opentelemetry-exporter-otlp-proto-common" +version = "1.22.0" +description = "OpenTelemetry Protobuf encoding" +optional = false +python-versions = ">=3.7" +files = [ + {file = "opentelemetry_exporter_otlp_proto_common-1.22.0-py3-none-any.whl", hash = "sha256:3f2538bec5312587f8676c332b3747f54c89fe6364803a807e217af4603201fa"}, + {file = "opentelemetry_exporter_otlp_proto_common-1.22.0.tar.gz", hash = "sha256:71ae2f81bc6d6fe408d06388826edc8933759b2ca3a97d24054507dc7cfce52d"}, +] + +[package.dependencies] +backoff = {version = ">=1.10.0,<3.0.0", markers = "python_version >= \"3.7\""} +opentelemetry-proto = "1.22.0" + +[[package]] +name = "opentelemetry-exporter-otlp-proto-grpc" +version = "1.22.0" +description = "OpenTelemetry Collector Protobuf over gRPC Exporter" +optional = false +python-versions = ">=3.7" +files = [ + {file = "opentelemetry_exporter_otlp_proto_grpc-1.22.0-py3-none-any.whl", hash = "sha256:b5bcadc129272004316a455e9081216d3380c1fc2231a928ea6a70aa90e173fb"}, + {file = "opentelemetry_exporter_otlp_proto_grpc-1.22.0.tar.gz", hash = "sha256:1e0e5aa4bbabc74942f06f268deffd94851d12a8dc30b02527472ef1729fe5b1"}, +] + +[package.dependencies] +backoff = {version = ">=1.10.0,<3.0.0", markers = "python_version >= \"3.7\""} +deprecated = ">=1.2.6" +googleapis-common-protos = ">=1.52,<2.0" +grpcio = ">=1.0.0,<2.0.0" +opentelemetry-api = ">=1.15,<2.0" +opentelemetry-exporter-otlp-proto-common = "1.22.0" +opentelemetry-proto = "1.22.0" +opentelemetry-sdk = ">=1.22.0,<1.23.0" + +[package.extras] +test = ["pytest-grpc"] + +[[package]] +name = "opentelemetry-instrumentation" +version = "0.43b0" +description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "opentelemetry_instrumentation-0.43b0-py3-none-any.whl", hash = "sha256:0ff1334d7e359e27640e9d420024efeb73eacae464309c2e14ede7ba6c93967e"}, + {file = "opentelemetry_instrumentation-0.43b0.tar.gz", hash = "sha256:c3755da6c4be8033be0216d0501e11f4832690f4e2eca5a3576fbf113498f0f6"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.4,<2.0" +setuptools = ">=16.0" +wrapt = ">=1.0.0,<2.0.0" + +[[package]] +name = "opentelemetry-instrumentation-asgi" +version = "0.43b0" +description = "ASGI instrumentation for OpenTelemetry" +optional = false +python-versions = ">=3.7" +files = [ + {file = "opentelemetry_instrumentation_asgi-0.43b0-py3-none-any.whl", hash = "sha256:1f593829fa039e9367820736fb063e92acd15c25b53d7bcb5d319971b8e93fd7"}, + {file = "opentelemetry_instrumentation_asgi-0.43b0.tar.gz", hash = "sha256:3f6f19333dca31ef696672e4e36cb1c2613c71dc7e847c11ff36a37e1130dadc"}, +] + +[package.dependencies] +asgiref = ">=3.0,<4.0" +opentelemetry-api = ">=1.12,<2.0" +opentelemetry-instrumentation = "0.43b0" +opentelemetry-semantic-conventions = "0.43b0" +opentelemetry-util-http = "0.43b0" + +[package.extras] +instruments = ["asgiref (>=3.0,<4.0)"] +test = ["opentelemetry-instrumentation-asgi[instruments]", "opentelemetry-test-utils (==0.43b0)"] + +[[package]] +name = "opentelemetry-instrumentation-fastapi" +version = "0.43b0" +description = "OpenTelemetry FastAPI Instrumentation" +optional = false +python-versions = ">=3.7" +files = [ + {file = "opentelemetry_instrumentation_fastapi-0.43b0-py3-none-any.whl", hash = "sha256:b79c044df68a52e07b35fa12a424e7cc0dd27ff0a171c5fdcc41dea9de8fc938"}, + {file = "opentelemetry_instrumentation_fastapi-0.43b0.tar.gz", hash = "sha256:2afaaf470622e1a2732182c68f6d2431ffe5e026a7edacd0f83605632b66347f"}, +] + +[package.dependencies] +opentelemetry-api = ">=1.12,<2.0" +opentelemetry-instrumentation = "0.43b0" +opentelemetry-instrumentation-asgi = "0.43b0" +opentelemetry-semantic-conventions = "0.43b0" +opentelemetry-util-http = "0.43b0" + +[package.extras] +instruments = ["fastapi (>=0.58,<1.0)"] +test = ["httpx (>=0.22,<1.0)", "opentelemetry-instrumentation-fastapi[instruments]", "opentelemetry-test-utils (==0.43b0)", "requests (>=2.23,<3.0)"] + +[[package]] +name = "opentelemetry-proto" +version = "1.22.0" +description = "OpenTelemetry Python Proto" +optional = false +python-versions = ">=3.7" +files = [ + {file = "opentelemetry_proto-1.22.0-py3-none-any.whl", hash = "sha256:ce7188d22c75b6d0fe53e7fb58501613d0feade5139538e79dedd9420610fa0c"}, + {file = "opentelemetry_proto-1.22.0.tar.gz", hash = "sha256:9ec29169286029f17ca34ec1f3455802ffb90131642d2f545ece9a63e8f69003"}, +] + +[package.dependencies] +protobuf = ">=3.19,<5.0" + +[[package]] +name = "opentelemetry-sdk" +version = "1.22.0" +description = "OpenTelemetry Python SDK" +optional = false +python-versions = ">=3.7" +files = [ + {file = "opentelemetry_sdk-1.22.0-py3-none-any.whl", hash = "sha256:a730555713d7c8931657612a88a141e3a4fe6eb5523d9e2d5a8b1e673d76efa6"}, + {file = "opentelemetry_sdk-1.22.0.tar.gz", hash = "sha256:45267ac1f38a431fc2eb5d6e0c0d83afc0b78de57ac345488aa58c28c17991d0"}, +] + +[package.dependencies] +opentelemetry-api = "1.22.0" +opentelemetry-semantic-conventions = "0.43b0" +typing-extensions = ">=3.7.4" + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.43b0" +description = "OpenTelemetry Semantic Conventions" +optional = false +python-versions = ">=3.7" +files = [ + {file = "opentelemetry_semantic_conventions-0.43b0-py3-none-any.whl", hash = "sha256:291284d7c1bf15fdaddf309b3bd6d3b7ce12a253cec6d27144439819a15d8445"}, + {file = "opentelemetry_semantic_conventions-0.43b0.tar.gz", hash = "sha256:b9576fb890df479626fa624e88dde42d3d60b8b6c8ae1152ad157a8b97358635"}, +] + +[[package]] +name = "opentelemetry-util-http" +version = "0.43b0" +description = "Web util for OpenTelemetry" +optional = false +python-versions = ">=3.7" +files = [ + {file = "opentelemetry_util_http-0.43b0-py3-none-any.whl", hash = "sha256:f25a820784b030f6cb86b3d76e5676c769b75ed3f55a210bcdae0a5e175ebadb"}, + {file = "opentelemetry_util_http-0.43b0.tar.gz", hash = "sha256:3ff6ab361dbe99fc81200d625603c0fb890c055c6e416a3e6d661ddf47a6c7f7"}, +] + +[[package]] +name = "orjson" +version = "3.10.1" +description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" +optional = false +python-versions = ">=3.8" +files = [ + {file = "orjson-3.10.1-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:8ec2fc456d53ea4a47768f622bb709be68acd455b0c6be57e91462259741c4f3"}, + {file = "orjson-3.10.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e900863691d327758be14e2a491931605bd0aded3a21beb6ce133889830b659"}, + {file = "orjson-3.10.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ab6ecbd6fe57785ebc86ee49e183f37d45f91b46fc601380c67c5c5e9c0014a2"}, + {file = "orjson-3.10.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8af7c68b01b876335cccfb4eee0beef2b5b6eae1945d46a09a7c24c9faac7a77"}, + {file = "orjson-3.10.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:915abfb2e528677b488a06eba173e9d7706a20fdfe9cdb15890b74ef9791b85e"}, + {file = "orjson-3.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe3fd4a36eff9c63d25503b439531d21828da9def0059c4f472e3845a081aa0b"}, + {file = "orjson-3.10.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d229564e72cfc062e6481a91977a5165c5a0fdce11ddc19ced8471847a67c517"}, + {file = "orjson-3.10.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:9e00495b18304173ac843b5c5fbea7b6f7968564d0d49bef06bfaeca4b656f4e"}, + {file = "orjson-3.10.1-cp310-none-win32.whl", hash = "sha256:fd78ec55179545c108174ba19c1795ced548d6cac4d80d014163033c047ca4ea"}, + {file = "orjson-3.10.1-cp310-none-win_amd64.whl", hash = "sha256:50ca42b40d5a442a9e22eece8cf42ba3d7cd4cd0f2f20184b4d7682894f05eec"}, + {file = "orjson-3.10.1-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:b345a3d6953628df2f42502297f6c1e1b475cfbf6268013c94c5ac80e8abc04c"}, + {file = "orjson-3.10.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:caa7395ef51af4190d2c70a364e2f42138e0e5fcb4bc08bc9b76997659b27dab"}, + {file = "orjson-3.10.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b01d701decd75ae092e5f36f7b88a1e7a1d3bb7c9b9d7694de850fb155578d5a"}, + {file = "orjson-3.10.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b5028981ba393f443d8fed9049211b979cadc9d0afecf162832f5a5b152c6297"}, + {file = "orjson-3.10.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:31ff6a222ea362b87bf21ff619598a4dc1106aaafaea32b1c4876d692891ec27"}, + {file = "orjson-3.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e852a83d7803d3406135fb7a57cf0c1e4a3e73bac80ec621bd32f01c653849c5"}, + {file = "orjson-3.10.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2567bc928ed3c3fcd90998009e8835de7c7dc59aabcf764b8374d36044864f3b"}, + {file = "orjson-3.10.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4ce98cac60b7bb56457bdd2ed7f0d5d7f242d291fdc0ca566c83fa721b52e92d"}, + {file = "orjson-3.10.1-cp311-none-win32.whl", hash = "sha256:813905e111318acb356bb8029014c77b4c647f8b03f314e7b475bd9ce6d1a8ce"}, + {file = "orjson-3.10.1-cp311-none-win_amd64.whl", hash = "sha256:03a3ca0b3ed52bed1a869163a4284e8a7b0be6a0359d521e467cdef7e8e8a3ee"}, + {file = "orjson-3.10.1-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:f02c06cee680b1b3a8727ec26c36f4b3c0c9e2b26339d64471034d16f74f4ef5"}, + {file = "orjson-3.10.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1aa2f127ac546e123283e437cc90b5ecce754a22306c7700b11035dad4ccf85"}, + {file = "orjson-3.10.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2cf29b4b74f585225196944dffdebd549ad2af6da9e80db7115984103fb18a96"}, + {file = "orjson-3.10.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a1b130c20b116f413caf6059c651ad32215c28500dce9cd029a334a2d84aa66f"}, + {file = "orjson-3.10.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d31f9a709e6114492136e87c7c6da5e21dfedebefa03af85f3ad72656c493ae9"}, + {file = "orjson-3.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d1d169461726f271ab31633cf0e7e7353417e16fb69256a4f8ecb3246a78d6e"}, + {file = "orjson-3.10.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:57c294d73825c6b7f30d11c9e5900cfec9a814893af7f14efbe06b8d0f25fba9"}, + {file = "orjson-3.10.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d7f11dbacfa9265ec76b4019efffabaabba7a7ebf14078f6b4df9b51c3c9a8ea"}, + {file = "orjson-3.10.1-cp312-none-win32.whl", hash = "sha256:d89e5ed68593226c31c76ab4de3e0d35c760bfd3fbf0a74c4b2be1383a1bf123"}, + {file = "orjson-3.10.1-cp312-none-win_amd64.whl", hash = "sha256:aa76c4fe147fd162107ce1692c39f7189180cfd3a27cfbc2ab5643422812da8e"}, + {file = "orjson-3.10.1-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a2c6a85c92d0e494c1ae117befc93cf8e7bca2075f7fe52e32698da650b2c6d1"}, + {file = "orjson-3.10.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9813f43da955197d36a7365eb99bed42b83680801729ab2487fef305b9ced866"}, + {file = "orjson-3.10.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ec917b768e2b34b7084cb6c68941f6de5812cc26c6f1a9fecb728e36a3deb9e8"}, + {file = "orjson-3.10.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5252146b3172d75c8a6d27ebca59c9ee066ffc5a277050ccec24821e68742fdf"}, + {file = "orjson-3.10.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:536429bb02791a199d976118b95014ad66f74c58b7644d21061c54ad284e00f4"}, + {file = "orjson-3.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7dfed3c3e9b9199fb9c3355b9c7e4649b65f639e50ddf50efdf86b45c6de04b5"}, + {file = "orjson-3.10.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:2b230ec35f188f003f5b543644ae486b2998f6afa74ee3a98fc8ed2e45960afc"}, + {file = "orjson-3.10.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:01234249ba19c6ab1eb0b8be89f13ea21218b2d72d496ef085cfd37e1bae9dd8"}, + {file = "orjson-3.10.1-cp38-none-win32.whl", hash = "sha256:8a884fbf81a3cc22d264ba780920d4885442144e6acaa1411921260416ac9a54"}, + {file = "orjson-3.10.1-cp38-none-win_amd64.whl", hash = "sha256:dab5f802d52b182163f307d2b1f727d30b1762e1923c64c9c56dd853f9671a49"}, + {file = "orjson-3.10.1-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a51fd55d4486bc5293b7a400f9acd55a2dc3b5fc8420d5ffe9b1d6bb1a056a5e"}, + {file = "orjson-3.10.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:53521542a6db1411b3bfa1b24ddce18605a3abdc95a28a67b33f9145f26aa8f2"}, + {file = "orjson-3.10.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:27d610df96ac18ace4931411d489637d20ab3b8f63562b0531bba16011998db0"}, + {file = "orjson-3.10.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:79244b1456e5846d44e9846534bd9e3206712936d026ea8e6a55a7374d2c0694"}, + {file = "orjson-3.10.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d751efaa8a49ae15cbebdda747a62a9ae521126e396fda8143858419f3b03610"}, + {file = "orjson-3.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27ff69c620a4fff33267df70cfd21e0097c2a14216e72943bd5414943e376d77"}, + {file = "orjson-3.10.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ebc58693464146506fde0c4eb1216ff6d4e40213e61f7d40e2f0dde9b2f21650"}, + {file = "orjson-3.10.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5be608c3972ed902e0143a5b8776d81ac1059436915d42defe5c6ae97b3137a4"}, + {file = "orjson-3.10.1-cp39-none-win32.whl", hash = "sha256:4ae10753e7511d359405aadcbf96556c86e9dbf3a948d26c2c9f9a150c52b091"}, + {file = "orjson-3.10.1-cp39-none-win_amd64.whl", hash = "sha256:fb5bc4caa2c192077fdb02dce4e5ef8639e7f20bec4e3a834346693907362932"}, + {file = "orjson-3.10.1.tar.gz", hash = "sha256:a883b28d73370df23ed995c466b4f6c708c1f7a9bdc400fe89165c96c7603204"}, +] + +[[package]] +name = "outcome" +version = "1.3.0.post0" +description = "Capture the outcome of Python function calls." +optional = false +python-versions = ">=3.7" +files = [ + {file = "outcome-1.3.0.post0-py2.py3-none-any.whl", hash = "sha256:e771c5ce06d1415e356078d3bdd68523f284b4ce5419828922b6871e65eda82b"}, + {file = "outcome-1.3.0.post0.tar.gz", hash = "sha256:9dcf02e65f2971b80047b377468e72a268e15c0af3cf1238e6ff14f7f91143b8"}, +] + +[package.dependencies] +attrs = ">=19.2.0" + +[[package]] +name = "overrides" +version = "7.4.0" +description = "A decorator to automatically detect mismatch when overriding a method." +optional = false +python-versions = ">=3.6" +files = [ + {file = "overrides-7.4.0-py3-none-any.whl", hash = "sha256:3ad24583f86d6d7a49049695efe9933e67ba62f0c7625d53c59fa832ce4b8b7d"}, + {file = "overrides-7.4.0.tar.gz", hash = "sha256:9502a3cca51f4fac40b5feca985b6703a5c1f6ad815588a7ca9e285b9dca6757"}, +] + +[[package]] +name = "packaging" +version = "23.2" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.7" +files = [ + {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"}, + {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, +] + +[[package]] +name = "pandas" +version = "2.1.4" +description = "Powerful data structures for data analysis, time series, and statistics" +optional = true +python-versions = ">=3.9" +files = [ + {file = "pandas-2.1.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bdec823dc6ec53f7a6339a0e34c68b144a7a1fd28d80c260534c39c62c5bf8c9"}, + {file = "pandas-2.1.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:294d96cfaf28d688f30c918a765ea2ae2e0e71d3536754f4b6de0ea4a496d034"}, + {file = "pandas-2.1.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b728fb8deba8905b319f96447a27033969f3ea1fea09d07d296c9030ab2ed1d"}, + {file = "pandas-2.1.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00028e6737c594feac3c2df15636d73ace46b8314d236100b57ed7e4b9ebe8d9"}, + {file = "pandas-2.1.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:426dc0f1b187523c4db06f96fb5c8d1a845e259c99bda74f7de97bd8a3bb3139"}, + {file = "pandas-2.1.4-cp310-cp310-win_amd64.whl", hash = "sha256:f237e6ca6421265643608813ce9793610ad09b40154a3344a088159590469e46"}, + {file = "pandas-2.1.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b7d852d16c270e4331f6f59b3e9aa23f935f5c4b0ed2d0bc77637a8890a5d092"}, + {file = "pandas-2.1.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bd7d5f2f54f78164b3d7a40f33bf79a74cdee72c31affec86bfcabe7e0789821"}, + {file = "pandas-2.1.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0aa6e92e639da0d6e2017d9ccff563222f4eb31e4b2c3cf32a2a392fc3103c0d"}, + {file = "pandas-2.1.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d797591b6846b9db79e65dc2d0d48e61f7db8d10b2a9480b4e3faaddc421a171"}, + {file = "pandas-2.1.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d2d3e7b00f703aea3945995ee63375c61b2e6aa5aa7871c5d622870e5e137623"}, + {file = "pandas-2.1.4-cp311-cp311-win_amd64.whl", hash = "sha256:dc9bf7ade01143cddc0074aa6995edd05323974e6e40d9dbde081021ded8510e"}, + {file = "pandas-2.1.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:482d5076e1791777e1571f2e2d789e940dedd927325cc3cb6d0800c6304082f6"}, + {file = "pandas-2.1.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8a706cfe7955c4ca59af8c7a0517370eafbd98593155b48f10f9811da440248b"}, + {file = "pandas-2.1.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b0513a132a15977b4a5b89aabd304647919bc2169eac4c8536afb29c07c23540"}, + {file = "pandas-2.1.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9f17f2b6fc076b2a0078862547595d66244db0f41bf79fc5f64a5c4d635bead"}, + {file = "pandas-2.1.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:45d63d2a9b1b37fa6c84a68ba2422dc9ed018bdaa668c7f47566a01188ceeec1"}, + {file = "pandas-2.1.4-cp312-cp312-win_amd64.whl", hash = "sha256:f69b0c9bb174a2342818d3e2778584e18c740d56857fc5cdb944ec8bbe4082cf"}, + {file = "pandas-2.1.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3f06bda01a143020bad20f7a85dd5f4a1600112145f126bc9e3e42077c24ef34"}, + {file = "pandas-2.1.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ab5796839eb1fd62a39eec2916d3e979ec3130509930fea17fe6f81e18108f6a"}, + {file = "pandas-2.1.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edbaf9e8d3a63a9276d707b4d25930a262341bca9874fcb22eff5e3da5394732"}, + {file = "pandas-2.1.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ebfd771110b50055712b3b711b51bee5d50135429364d0498e1213a7adc2be8"}, + {file = "pandas-2.1.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8ea107e0be2aba1da619cc6ba3f999b2bfc9669a83554b1904ce3dd9507f0860"}, + {file = "pandas-2.1.4-cp39-cp39-win_amd64.whl", hash = "sha256:d65148b14788b3758daf57bf42725caa536575da2b64df9964c563b015230984"}, + {file = "pandas-2.1.4.tar.gz", hash = "sha256:fcb68203c833cc735321512e13861358079a96c174a61f5116a1de89c58c0ef7"}, +] + +[package.dependencies] +numpy = [ + {version = ">=1.23.2,<2", markers = "python_version == \"3.11\""}, + {version = ">=1.26.0,<2", markers = "python_version >= \"3.12\""}, + {version = ">=1.22.4,<2", markers = "python_version < \"3.11\""}, +] +python-dateutil = ">=2.8.2" +pytz = ">=2020.1" +tzdata = ">=2022.1" + +[package.extras] +all = ["PyQt5 (>=5.15.6)", "SQLAlchemy (>=1.4.36)", "beautifulsoup4 (>=4.11.1)", "bottleneck (>=1.3.4)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=0.8.1)", "fsspec (>=2022.05.0)", "gcsfs (>=2022.05.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.8.0)", "matplotlib (>=3.6.1)", "numba (>=0.55.2)", "numexpr (>=2.8.0)", "odfpy (>=1.4.1)", "openpyxl (>=3.0.10)", "pandas-gbq (>=0.17.5)", "psycopg2 (>=2.9.3)", "pyarrow (>=7.0.0)", "pymysql (>=1.0.2)", "pyreadstat (>=1.1.5)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)", "pyxlsb (>=1.0.9)", "qtpy (>=2.2.0)", "s3fs (>=2022.05.0)", "scipy (>=1.8.1)", "tables (>=3.7.0)", "tabulate (>=0.8.10)", "xarray (>=2022.03.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.3)", "zstandard (>=0.17.0)"] +aws = ["s3fs (>=2022.05.0)"] +clipboard = ["PyQt5 (>=5.15.6)", "qtpy (>=2.2.0)"] +compression = ["zstandard (>=0.17.0)"] +computation = ["scipy (>=1.8.1)", "xarray (>=2022.03.0)"] +consortium-standard = ["dataframe-api-compat (>=0.1.7)"] +excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.0.10)", "pyxlsb (>=1.0.9)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.3)"] +feather = ["pyarrow (>=7.0.0)"] +fss = ["fsspec (>=2022.05.0)"] +gcp = ["gcsfs (>=2022.05.0)", "pandas-gbq (>=0.17.5)"] +hdf5 = ["tables (>=3.7.0)"] +html = ["beautifulsoup4 (>=4.11.1)", "html5lib (>=1.1)", "lxml (>=4.8.0)"] +mysql = ["SQLAlchemy (>=1.4.36)", "pymysql (>=1.0.2)"] +output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.8.10)"] +parquet = ["pyarrow (>=7.0.0)"] +performance = ["bottleneck (>=1.3.4)", "numba (>=0.55.2)", "numexpr (>=2.8.0)"] +plot = ["matplotlib (>=3.6.1)"] +postgresql = ["SQLAlchemy (>=1.4.36)", "psycopg2 (>=2.9.3)"] +spss = ["pyreadstat (>=1.1.5)"] +sql-other = ["SQLAlchemy (>=1.4.36)"] +test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"] +xml = ["lxml (>=4.8.0)"] + +[[package]] +name = "parso" +version = "0.8.3" +description = "A Python Parser" +optional = true +python-versions = ">=3.6" +files = [ + {file = "parso-0.8.3-py2.py3-none-any.whl", hash = "sha256:c001d4636cd3aecdaf33cbb40aebb59b094be2a74c556778ef5576c175e19e75"}, + {file = "parso-0.8.3.tar.gz", hash = "sha256:8c07be290bb59f03588915921e29e8a50002acaf2cdc5fa0e0114f91709fafa0"}, +] + +[package.extras] +qa = ["flake8 (==3.8.3)", "mypy (==0.782)"] +testing = ["docopt", "pytest (<6.0.0)"] + +[[package]] +name = "pathlib-abc" +version = "0.1.1" +description = "Backport of pathlib ABCs" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pathlib_abc-0.1.1-py3-none-any.whl", hash = "sha256:5cb2f942fdbb75ce0e00ed3b94de9695b0c5d4b419b5004a702da80f165b3109"}, + {file = "pathlib_abc-0.1.1.tar.gz", hash = "sha256:084e7bdd919b0f7774914a9e64cd86a1b39860f81f781dd724258631f2915abe"}, +] + +[[package]] +name = "pathspec" +version = "0.12.1" +description = "Utility library for gitignore style pattern matching of file paths." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, + {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, +] + +[[package]] +name = "pathy" +version = "0.11.0" +description = "pathlib.Path subclasses for local and cloud bucket storage" +optional = false +python-versions = ">= 3.8" +files = [ + {file = "pathy-0.11.0-py3-none-any.whl", hash = "sha256:5027f44744cdcd6b6ffd0b0570133dc1bc4af4b87a4f574ecdd810552b1a9fb0"}, + {file = "pathy-0.11.0.tar.gz", hash = "sha256:bb3d0e6b0b8bf76ef4f63c7191e96e0af2ed65c8fdb5fa17488f9c879e63706d"}, +] + +[package.dependencies] +pathlib-abc = "0.1.1" +smart-open = ">=5.2.1,<7.0.0" +typer = ">=0.3.0,<1.0.0" + +[package.extras] +all = ["azure-storage-blob", "boto3", "google-cloud-storage (>=1.26.0,<2.0.0)", "mock", "pytest", "pytest-coverage", "typer-cli"] +azure = ["azure-storage-blob"] +gcs = ["google-cloud-storage (>=1.26.0,<2.0.0)"] +s3 = ["boto3"] +test = ["mock", "pytest", "pytest-coverage", "typer-cli"] + +[[package]] +name = "pexpect" +version = "4.9.0" +description = "Pexpect allows easy control of interactive console applications." +optional = true +python-versions = "*" +files = [ + {file = "pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523"}, + {file = "pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f"}, +] + +[package.dependencies] +ptyprocess = ">=0.5" + +[[package]] +name = "pillow" +version = "10.2.0" +description = "Python Imaging Library (Fork)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pillow-10.2.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:7823bdd049099efa16e4246bdf15e5a13dbb18a51b68fa06d6c1d4d8b99a796e"}, + {file = "pillow-10.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:83b2021f2ade7d1ed556bc50a399127d7fb245e725aa0113ebd05cfe88aaf588"}, + {file = "pillow-10.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6fad5ff2f13d69b7e74ce5b4ecd12cc0ec530fcee76356cac6742785ff71c452"}, + {file = "pillow-10.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da2b52b37dad6d9ec64e653637a096905b258d2fc2b984c41ae7d08b938a67e4"}, + {file = "pillow-10.2.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:47c0995fc4e7f79b5cfcab1fc437ff2890b770440f7696a3ba065ee0fd496563"}, + {file = "pillow-10.2.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:322bdf3c9b556e9ffb18f93462e5f749d3444ce081290352c6070d014c93feb2"}, + {file = "pillow-10.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:51f1a1bffc50e2e9492e87d8e09a17c5eea8409cda8d3f277eb6edc82813c17c"}, + {file = "pillow-10.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:69ffdd6120a4737710a9eee73e1d2e37db89b620f702754b8f6e62594471dee0"}, + {file = "pillow-10.2.0-cp310-cp310-win32.whl", hash = "sha256:c6dafac9e0f2b3c78df97e79af707cdc5ef8e88208d686a4847bab8266870023"}, + {file = "pillow-10.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:aebb6044806f2e16ecc07b2a2637ee1ef67a11840a66752751714a0d924adf72"}, + {file = "pillow-10.2.0-cp310-cp310-win_arm64.whl", hash = "sha256:7049e301399273a0136ff39b84c3678e314f2158f50f517bc50285fb5ec847ad"}, + {file = "pillow-10.2.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:35bb52c37f256f662abdfa49d2dfa6ce5d93281d323a9af377a120e89a9eafb5"}, + {file = "pillow-10.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9c23f307202661071d94b5e384e1e1dc7dfb972a28a2310e4ee16103e66ddb67"}, + {file = "pillow-10.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:773efe0603db30c281521a7c0214cad7836c03b8ccff897beae9b47c0b657d61"}, + {file = "pillow-10.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11fa2e5984b949b0dd6d7a94d967743d87c577ff0b83392f17cb3990d0d2fd6e"}, + {file = "pillow-10.2.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:716d30ed977be8b37d3ef185fecb9e5a1d62d110dfbdcd1e2a122ab46fddb03f"}, + {file = "pillow-10.2.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a086c2af425c5f62a65e12fbf385f7c9fcb8f107d0849dba5839461a129cf311"}, + {file = "pillow-10.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c8de2789052ed501dd829e9cae8d3dcce7acb4777ea4a479c14521c942d395b1"}, + {file = "pillow-10.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:609448742444d9290fd687940ac0b57fb35e6fd92bdb65386e08e99af60bf757"}, + {file = "pillow-10.2.0-cp311-cp311-win32.whl", hash = "sha256:823ef7a27cf86df6597fa0671066c1b596f69eba53efa3d1e1cb8b30f3533068"}, + {file = "pillow-10.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:1da3b2703afd040cf65ec97efea81cfba59cdbed9c11d8efc5ab09df9509fc56"}, + {file = "pillow-10.2.0-cp311-cp311-win_arm64.whl", hash = "sha256:edca80cbfb2b68d7b56930b84a0e45ae1694aeba0541f798e908a49d66b837f1"}, + {file = "pillow-10.2.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:1b5e1b74d1bd1b78bc3477528919414874748dd363e6272efd5abf7654e68bef"}, + {file = "pillow-10.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0eae2073305f451d8ecacb5474997c08569fb4eb4ac231ffa4ad7d342fdc25ac"}, + {file = "pillow-10.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b7c2286c23cd350b80d2fc9d424fc797575fb16f854b831d16fd47ceec078f2c"}, + {file = "pillow-10.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e23412b5c41e58cec602f1135c57dfcf15482013ce6e5f093a86db69646a5aa"}, + {file = "pillow-10.2.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:52a50aa3fb3acb9cf7213573ef55d31d6eca37f5709c69e6858fe3bc04a5c2a2"}, + {file = "pillow-10.2.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:127cee571038f252a552760076407f9cff79761c3d436a12af6000cd182a9d04"}, + {file = "pillow-10.2.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:8d12251f02d69d8310b046e82572ed486685c38f02176bd08baf216746eb947f"}, + {file = "pillow-10.2.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:54f1852cd531aa981bc0965b7d609f5f6cc8ce8c41b1139f6ed6b3c54ab82bfb"}, + {file = "pillow-10.2.0-cp312-cp312-win32.whl", hash = "sha256:257d8788df5ca62c980314053197f4d46eefedf4e6175bc9412f14412ec4ea2f"}, + {file = "pillow-10.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:154e939c5f0053a383de4fd3d3da48d9427a7e985f58af8e94d0b3c9fcfcf4f9"}, + {file = "pillow-10.2.0-cp312-cp312-win_arm64.whl", hash = "sha256:f379abd2f1e3dddb2b61bc67977a6b5a0a3f7485538bcc6f39ec76163891ee48"}, + {file = "pillow-10.2.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:8373c6c251f7ef8bda6675dd6d2b3a0fcc31edf1201266b5cf608b62a37407f9"}, + {file = "pillow-10.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:870ea1ada0899fd0b79643990809323b389d4d1d46c192f97342eeb6ee0b8483"}, + {file = "pillow-10.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4b6b1e20608493548b1f32bce8cca185bf0480983890403d3b8753e44077129"}, + {file = "pillow-10.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3031709084b6e7852d00479fd1d310b07d0ba82765f973b543c8af5061cf990e"}, + {file = "pillow-10.2.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:3ff074fc97dd4e80543a3e91f69d58889baf2002b6be64347ea8cf5533188213"}, + {file = "pillow-10.2.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:cb4c38abeef13c61d6916f264d4845fab99d7b711be96c326b84df9e3e0ff62d"}, + {file = "pillow-10.2.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b1b3020d90c2d8e1dae29cf3ce54f8094f7938460fb5ce8bc5c01450b01fbaf6"}, + {file = "pillow-10.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:170aeb00224ab3dc54230c797f8404507240dd868cf52066f66a41b33169bdbe"}, + {file = "pillow-10.2.0-cp38-cp38-win32.whl", hash = "sha256:c4225f5220f46b2fde568c74fca27ae9771536c2e29d7c04f4fb62c83275ac4e"}, + {file = "pillow-10.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:0689b5a8c5288bc0504d9fcee48f61a6a586b9b98514d7d29b840143d6734f39"}, + {file = "pillow-10.2.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:b792a349405fbc0163190fde0dc7b3fef3c9268292586cf5645598b48e63dc67"}, + {file = "pillow-10.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c570f24be1e468e3f0ce7ef56a89a60f0e05b30a3669a459e419c6eac2c35364"}, + {file = "pillow-10.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8ecd059fdaf60c1963c58ceb8997b32e9dc1b911f5da5307aab614f1ce5c2fb"}, + {file = "pillow-10.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c365fd1703040de1ec284b176d6af5abe21b427cb3a5ff68e0759e1e313a5e7e"}, + {file = "pillow-10.2.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:70c61d4c475835a19b3a5aa42492409878bbca7438554a1f89d20d58a7c75c01"}, + {file = "pillow-10.2.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b6f491cdf80ae540738859d9766783e3b3c8e5bd37f5dfa0b76abdecc5081f13"}, + {file = "pillow-10.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9d189550615b4948f45252d7f005e53c2040cea1af5b60d6f79491a6e147eef7"}, + {file = "pillow-10.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:49d9ba1ed0ef3e061088cd1e7538a0759aab559e2e0a80a36f9fd9d8c0c21591"}, + {file = "pillow-10.2.0-cp39-cp39-win32.whl", hash = "sha256:babf5acfede515f176833ed6028754cbcd0d206f7f614ea3447d67c33be12516"}, + {file = "pillow-10.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:0304004f8067386b477d20a518b50f3fa658a28d44e4116970abfcd94fac34a8"}, + {file = "pillow-10.2.0-cp39-cp39-win_arm64.whl", hash = "sha256:0fb3e7fc88a14eacd303e90481ad983fd5b69c761e9e6ef94c983f91025da869"}, + {file = "pillow-10.2.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:322209c642aabdd6207517e9739c704dc9f9db943015535783239022002f054a"}, + {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3eedd52442c0a5ff4f887fab0c1c0bb164d8635b32c894bc1faf4c618dd89df2"}, + {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cb28c753fd5eb3dd859b4ee95de66cc62af91bcff5db5f2571d32a520baf1f04"}, + {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:33870dc4653c5017bf4c8873e5488d8f8d5f8935e2f1fb9a2208c47cdd66efd2"}, + {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3c31822339516fb3c82d03f30e22b1d038da87ef27b6a78c9549888f8ceda39a"}, + {file = "pillow-10.2.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a2b56ba36e05f973d450582fb015594aaa78834fefe8dfb8fcd79b93e64ba4c6"}, + {file = "pillow-10.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:d8e6aeb9201e655354b3ad049cb77d19813ad4ece0df1249d3c793de3774f8c7"}, + {file = "pillow-10.2.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:2247178effb34a77c11c0e8ac355c7a741ceca0a732b27bf11e747bbc950722f"}, + {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15587643b9e5eb26c48e49a7b33659790d28f190fc514a322d55da2fb5c2950e"}, + {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753cd8f2086b2b80180d9b3010dd4ed147efc167c90d3bf593fe2af21265e5a5"}, + {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:7c8f97e8e7a9009bcacbe3766a36175056c12f9a44e6e6f2d5caad06dcfbf03b"}, + {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d1b35bcd6c5543b9cb547dee3150c93008f8dd0f1fef78fc0cd2b141c5baf58a"}, + {file = "pillow-10.2.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fe4c15f6c9285dc54ce6553a3ce908ed37c8f3825b5a51a15c91442bb955b868"}, + {file = "pillow-10.2.0.tar.gz", hash = "sha256:e87f0b2c78157e12d7686b27d63c070fd65d994e8ddae6f328e0dcf4a0cd007e"}, +] + +[package.extras] +docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] +fpx = ["olefile"] +mic = ["olefile"] +tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] +typing = ["typing-extensions"] +xmp = ["defusedxml"] + +[[package]] +name = "pinecone-client" +version = "2.2.4" +description = "Pinecone client and SDK" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pinecone-client-2.2.4.tar.gz", hash = "sha256:2c1cc1d6648b2be66e944db2ffa59166a37b9164d1135ad525d9cd8b1e298168"}, + {file = "pinecone_client-2.2.4-py3-none-any.whl", hash = "sha256:5bf496c01c2f82f4e5c2dc977cc5062ecd7168b8ed90743b09afcc8c7eb242ec"}, +] + +[package.dependencies] +dnspython = ">=2.0.0" +loguru = ">=0.5.0" +numpy = ">=1.22.0" +python-dateutil = ">=2.5.3" +pyyaml = ">=5.4" +requests = ">=2.19.0" +tqdm = ">=4.64.1" +typing-extensions = ">=3.7.4" +urllib3 = ">=1.21.1" + +[package.extras] +grpc = ["googleapis-common-protos (>=1.53.0)", "grpc-gateway-protoc-gen-openapiv2 (==0.1.0)", "grpcio (>=1.44.0)", "lz4 (>=3.1.3)", "protobuf (>=3.20.0,<3.21.0)"] + +[[package]] +name = "platformdirs" +version = "4.1.0" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +optional = false +python-versions = ">=3.8" +files = [ + {file = "platformdirs-4.1.0-py3-none-any.whl", hash = "sha256:11c8f37bcca40db96d8144522d925583bdb7a31f7b0e37e3ed4318400a8e2380"}, + {file = "platformdirs-4.1.0.tar.gz", hash = "sha256:906d548203468492d432bcb294d4bc2fff751bf84971fbb2c10918cc206ee420"}, +] + +[package.extras] +docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.1)", "sphinx-autodoc-typehints (>=1.24)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)"] + +[[package]] +name = "playsound" +version = "1.2.2" +description = "Pure Python, cross platform, single function module with no dependencies for playing sounds." +optional = false +python-versions = "*" +files = [ + {file = "playsound-1.2.2-py2.py3-none-any.whl", hash = "sha256:1e83750a5325cbccee03d6e751ba3e78c037ac95b95a3ba1f38d0c5aca9e1a34"}, +] + +[[package]] +name = "pluggy" +version = "1.3.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pluggy-1.3.0-py3-none-any.whl", hash = "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7"}, + {file = "pluggy-1.3.0.tar.gz", hash = "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "posthog" +version = "3.3.1" +description = "Integrate PostHog into any python application." +optional = false +python-versions = "*" +files = [ + {file = "posthog-3.3.1-py2.py3-none-any.whl", hash = "sha256:5f53b232acb680a0389e372db5f786061a18386b8b5324bddcc64eff9fdb319b"}, + {file = "posthog-3.3.1.tar.gz", hash = "sha256:252cb6ab5cbe7ff002753f34fb647721b3af75034b4a5a631317ebf3db58fe59"}, +] + +[package.dependencies] +backoff = ">=1.10.0" +monotonic = ">=1.5" +python-dateutil = ">2.1" +requests = ">=2.7,<3.0" +six = ">=1.5" + +[package.extras] +dev = ["black", "flake8", "flake8-print", "isort", "pre-commit"] +sentry = ["django", "sentry-sdk"] +test = ["coverage", "flake8", "freezegun (==0.3.15)", "mock (>=2.0.0)", "pylint", "pytest", "pytest-timeout"] + +[[package]] +name = "pre-commit" +version = "3.6.0" +description = "A framework for managing and maintaining multi-language pre-commit hooks." +optional = false +python-versions = ">=3.9" +files = [ + {file = "pre_commit-3.6.0-py2.py3-none-any.whl", hash = "sha256:c255039ef399049a5544b6ce13d135caba8f2c28c3b4033277a788f434308376"}, + {file = "pre_commit-3.6.0.tar.gz", hash = "sha256:d30bad9abf165f7785c15a21a1f46da7d0677cb00ee7ff4c579fd38922efe15d"}, +] + +[package.dependencies] +cfgv = ">=2.0.0" +identify = ">=1.0.0" +nodeenv = ">=0.11.1" +pyyaml = ">=5.1" +virtualenv = ">=20.10.0" + +[[package]] +name = "preshed" +version = "3.0.9" +description = "Cython hash table that trusts the keys are pre-hashed" +optional = false +python-versions = ">=3.6" +files = [ + {file = "preshed-3.0.9-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4f96ef4caf9847b2bb9868574dcbe2496f974e41c2b83d6621c24fb4c3fc57e3"}, + {file = "preshed-3.0.9-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a61302cf8bd30568631adcdaf9e6b21d40491bd89ba8ebf67324f98b6c2a2c05"}, + {file = "preshed-3.0.9-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99499e8a58f58949d3f591295a97bca4e197066049c96f5d34944dd21a497193"}, + {file = "preshed-3.0.9-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea6b6566997dc3acd8c6ee11a89539ac85c77275b4dcefb2dc746d11053a5af8"}, + {file = "preshed-3.0.9-cp310-cp310-win_amd64.whl", hash = "sha256:bfd523085a84b1338ff18f61538e1cfcdedc4b9e76002589a301c364d19a2e36"}, + {file = "preshed-3.0.9-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e7c2364da27f2875524ce1ca754dc071515a9ad26eb5def4c7e69129a13c9a59"}, + {file = "preshed-3.0.9-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:182138033c0730c683a6d97e567ceb8a3e83f3bff5704f300d582238dbd384b3"}, + {file = "preshed-3.0.9-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:345a10be3b86bcc6c0591d343a6dc2bfd86aa6838c30ced4256dfcfa836c3a64"}, + {file = "preshed-3.0.9-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51d0192274aa061699b284f9fd08416065348edbafd64840c3889617ee1609de"}, + {file = "preshed-3.0.9-cp311-cp311-win_amd64.whl", hash = "sha256:96b857d7a62cbccc3845ac8c41fd23addf052821be4eb987f2eb0da3d8745aa1"}, + {file = "preshed-3.0.9-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b4fe6720012c62e6d550d6a5c1c7ad88cacef8388d186dad4bafea4140d9d198"}, + {file = "preshed-3.0.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e04f05758875be9751e483bd3c519c22b00d3b07f5a64441ec328bb9e3c03700"}, + {file = "preshed-3.0.9-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a55091d0e395f1fdb62ab43401bb9f8b46c7d7794d5b071813c29dc1ab22fd0"}, + {file = "preshed-3.0.9-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7de8f5138bcac7870424e09684dc3dd33c8e30e81b269f6c9ede3d8c7bb8e257"}, + {file = "preshed-3.0.9-cp312-cp312-win_amd64.whl", hash = "sha256:24229c77364628743bc29c5620c5d6607ed104f0e02ae31f8a030f99a78a5ceb"}, + {file = "preshed-3.0.9-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b73b0f7ecc58095ebbc6ca26ec806008ef780190fe685ce471b550e7eef58dc2"}, + {file = "preshed-3.0.9-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5cb90ecd5bec71c21d95962db1a7922364d6db2abe284a8c4b196df8bbcc871e"}, + {file = "preshed-3.0.9-cp36-cp36m-win_amd64.whl", hash = "sha256:e304a0a8c9d625b70ba850c59d4e67082a6be9c16c4517b97850a17a282ebee6"}, + {file = "preshed-3.0.9-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:1fa6d3d5529b08296ff9b7b4da1485c080311fd8744bbf3a86019ff88007b382"}, + {file = "preshed-3.0.9-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef1e5173809d85edd420fc79563b286b88b4049746b797845ba672cf9435c0e7"}, + {file = "preshed-3.0.9-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7fe81eb21c7d99e8b9a802cc313b998c5f791bda592903c732b607f78a6b7dc4"}, + {file = "preshed-3.0.9-cp37-cp37m-win_amd64.whl", hash = "sha256:78590a4a952747c3766e605ce8b747741005bdb1a5aa691a18aae67b09ece0e6"}, + {file = "preshed-3.0.9-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3452b64d97ce630e200c415073040aa494ceec6b7038f7a2a3400cbd7858e952"}, + {file = "preshed-3.0.9-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ac970d97b905e9e817ec13d31befd5b07c9cfec046de73b551d11a6375834b79"}, + {file = "preshed-3.0.9-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eebaa96ece6641cd981491cba995b68c249e0b6877c84af74971eacf8990aa19"}, + {file = "preshed-3.0.9-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d473c5f6856e07a88d41fe00bb6c206ecf7b34c381d30de0b818ba2ebaf9406"}, + {file = "preshed-3.0.9-cp38-cp38-win_amd64.whl", hash = "sha256:0de63a560f10107a3f0a9e252cc3183b8fdedcb5f81a86938fd9f1dcf8a64adf"}, + {file = "preshed-3.0.9-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3a9ad9f738084e048a7c94c90f40f727217387115b2c9a95c77f0ce943879fcd"}, + {file = "preshed-3.0.9-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a671dfa30b67baa09391faf90408b69c8a9a7f81cb9d83d16c39a182355fbfce"}, + {file = "preshed-3.0.9-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23906d114fc97c17c5f8433342495d7562e96ecfd871289c2bb2ed9a9df57c3f"}, + {file = "preshed-3.0.9-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:778cf71f82cedd2719b256f3980d556d6fb56ec552334ba79b49d16e26e854a0"}, + {file = "preshed-3.0.9-cp39-cp39-win_amd64.whl", hash = "sha256:a6e579439b329eb93f32219ff27cb358b55fbb52a4862c31a915a098c8a22ac2"}, + {file = "preshed-3.0.9.tar.gz", hash = "sha256:721863c5244ffcd2651ad0928951a2c7c77b102f4e11a251ad85d37ee7621660"}, +] + +[package.dependencies] +cymem = ">=2.0.2,<2.1.0" +murmurhash = ">=0.28.0,<1.1.0" + +[[package]] +name = "priority" +version = "2.0.0" +description = "A pure-Python implementation of the HTTP/2 priority tree" +optional = false +python-versions = ">=3.6.1" +files = [ + {file = "priority-2.0.0-py3-none-any.whl", hash = "sha256:6f8eefce5f3ad59baf2c080a664037bb4725cd0a790d53d59ab4059288faf6aa"}, + {file = "priority-2.0.0.tar.gz", hash = "sha256:c965d54f1b8d0d0b19479db3924c7c36cf672dbf2aec92d43fbdaf4492ba18c0"}, +] + +[[package]] +name = "prompt-toolkit" +version = "3.0.43" +description = "Library for building powerful interactive command lines in Python" +optional = true +python-versions = ">=3.7.0" +files = [ + {file = "prompt_toolkit-3.0.43-py3-none-any.whl", hash = "sha256:a11a29cb3bf0a28a387fe5122cdb649816a957cd9261dcedf8c9f1fef33eacf6"}, + {file = "prompt_toolkit-3.0.43.tar.gz", hash = "sha256:3527b7af26106cbc65a040bcc84839a3566ec1b051bb0bfe953631e704b0ff7d"}, +] + +[package.dependencies] +wcwidth = "*" + +[[package]] +name = "proto-plus" +version = "1.23.0" +description = "Beautiful, Pythonic protocol buffers." +optional = false +python-versions = ">=3.6" +files = [ + {file = "proto-plus-1.23.0.tar.gz", hash = "sha256:89075171ef11988b3fa157f5dbd8b9cf09d65fffee97e29ce403cd8defba19d2"}, + {file = "proto_plus-1.23.0-py3-none-any.whl", hash = "sha256:a829c79e619e1cf632de091013a4173deed13a55f326ef84f05af6f50ff4c82c"}, +] + +[package.dependencies] +protobuf = ">=3.19.0,<5.0.0dev" + +[package.extras] +testing = ["google-api-core[grpc] (>=1.31.5)"] + +[[package]] +name = "protobuf" +version = "4.25.2" +description = "" +optional = false +python-versions = ">=3.8" +files = [ + {file = "protobuf-4.25.2-cp310-abi3-win32.whl", hash = "sha256:b50c949608682b12efb0b2717f53256f03636af5f60ac0c1d900df6213910fd6"}, + {file = "protobuf-4.25.2-cp310-abi3-win_amd64.whl", hash = "sha256:8f62574857ee1de9f770baf04dde4165e30b15ad97ba03ceac65f760ff018ac9"}, + {file = "protobuf-4.25.2-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:2db9f8fa64fbdcdc93767d3cf81e0f2aef176284071507e3ede160811502fd3d"}, + {file = "protobuf-4.25.2-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:10894a2885b7175d3984f2be8d9850712c57d5e7587a2410720af8be56cdaf62"}, + {file = "protobuf-4.25.2-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:fc381d1dd0516343f1440019cedf08a7405f791cd49eef4ae1ea06520bc1c020"}, + {file = "protobuf-4.25.2-cp38-cp38-win32.whl", hash = "sha256:33a1aeef4b1927431d1be780e87b641e322b88d654203a9e9d93f218ee359e61"}, + {file = "protobuf-4.25.2-cp38-cp38-win_amd64.whl", hash = "sha256:47f3de503fe7c1245f6f03bea7e8d3ec11c6c4a2ea9ef910e3221c8a15516d62"}, + {file = "protobuf-4.25.2-cp39-cp39-win32.whl", hash = "sha256:5e5c933b4c30a988b52e0b7c02641760a5ba046edc5e43d3b94a74c9fc57c1b3"}, + {file = "protobuf-4.25.2-cp39-cp39-win_amd64.whl", hash = "sha256:d66a769b8d687df9024f2985d5137a337f957a0916cf5464d1513eee96a63ff0"}, + {file = "protobuf-4.25.2-py3-none-any.whl", hash = "sha256:a8b7a98d4ce823303145bf3c1a8bdb0f2f4642a414b196f04ad9853ed0c8f830"}, + {file = "protobuf-4.25.2.tar.gz", hash = "sha256:fe599e175cb347efc8ee524bcd4b902d11f7262c0e569ececcb89995c15f0a5e"}, +] + +[[package]] +name = "psutil" +version = "5.9.7" +description = "Cross-platform lib for process and system monitoring in Python." +optional = true +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +files = [ + {file = "psutil-5.9.7-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:0bd41bf2d1463dfa535942b2a8f0e958acf6607ac0be52265ab31f7923bcd5e6"}, + {file = "psutil-5.9.7-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:5794944462509e49d4d458f4dbfb92c47539e7d8d15c796f141f474010084056"}, + {file = "psutil-5.9.7-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:fe361f743cb3389b8efda21980d93eb55c1f1e3898269bc9a2a1d0bb7b1f6508"}, + {file = "psutil-5.9.7-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:e469990e28f1ad738f65a42dcfc17adaed9d0f325d55047593cb9033a0ab63df"}, + {file = "psutil-5.9.7-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:3c4747a3e2ead1589e647e64aad601981f01b68f9398ddf94d01e3dc0d1e57c7"}, + {file = "psutil-5.9.7-cp27-none-win32.whl", hash = "sha256:1d4bc4a0148fdd7fd8f38e0498639ae128e64538faa507df25a20f8f7fb2341c"}, + {file = "psutil-5.9.7-cp27-none-win_amd64.whl", hash = "sha256:4c03362e280d06bbbfcd52f29acd79c733e0af33d707c54255d21029b8b32ba6"}, + {file = "psutil-5.9.7-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:ea36cc62e69a13ec52b2f625c27527f6e4479bca2b340b7a452af55b34fcbe2e"}, + {file = "psutil-5.9.7-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1132704b876e58d277168cd729d64750633d5ff0183acf5b3c986b8466cd0284"}, + {file = "psutil-5.9.7-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe8b7f07948f1304497ce4f4684881250cd859b16d06a1dc4d7941eeb6233bfe"}, + {file = "psutil-5.9.7-cp36-cp36m-win32.whl", hash = "sha256:b27f8fdb190c8c03914f908a4555159327d7481dac2f01008d483137ef3311a9"}, + {file = "psutil-5.9.7-cp36-cp36m-win_amd64.whl", hash = "sha256:44969859757f4d8f2a9bd5b76eba8c3099a2c8cf3992ff62144061e39ba8568e"}, + {file = "psutil-5.9.7-cp37-abi3-win32.whl", hash = "sha256:c727ca5a9b2dd5193b8644b9f0c883d54f1248310023b5ad3e92036c5e2ada68"}, + {file = "psutil-5.9.7-cp37-abi3-win_amd64.whl", hash = "sha256:f37f87e4d73b79e6c5e749440c3113b81d1ee7d26f21c19c47371ddea834f414"}, + {file = "psutil-5.9.7-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:032f4f2c909818c86cea4fe2cc407f1c0f0cde8e6c6d702b28b8ce0c0d143340"}, + {file = "psutil-5.9.7.tar.gz", hash = "sha256:3f02134e82cfb5d089fddf20bb2e03fd5cd52395321d1c8458a9e58500ff417c"}, +] + +[package.extras] +test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] + +[[package]] +name = "psycopg2-binary" +version = "2.9.9" +description = "psycopg2 - Python-PostgreSQL Database Adapter" +optional = false +python-versions = ">=3.7" +files = [ + {file = "psycopg2-binary-2.9.9.tar.gz", hash = "sha256:7f01846810177d829c7692f1f5ada8096762d9172af1b1a28d4ab5b77c923c1c"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c2470da5418b76232f02a2fcd2229537bb2d5a7096674ce61859c3229f2eb202"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c6af2a6d4b7ee9615cbb162b0738f6e1fd1f5c3eda7e5da17861eacf4c717ea7"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:75723c3c0fbbf34350b46a3199eb50638ab22a0228f93fb472ef4d9becc2382b"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:83791a65b51ad6ee6cf0845634859d69a038ea9b03d7b26e703f94c7e93dbcf9"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0ef4854e82c09e84cc63084a9e4ccd6d9b154f1dbdd283efb92ecd0b5e2b8c84"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed1184ab8f113e8d660ce49a56390ca181f2981066acc27cf637d5c1e10ce46e"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d2997c458c690ec2bc6b0b7ecbafd02b029b7b4283078d3b32a852a7ce3ddd98"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:b58b4710c7f4161b5e9dcbe73bb7c62d65670a87df7bcce9e1faaad43e715245"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:0c009475ee389757e6e34611d75f6e4f05f0cf5ebb76c6037508318e1a1e0d7e"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8dbf6d1bc73f1d04ec1734bae3b4fb0ee3cb2a493d35ede9badbeb901fb40f6f"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-win32.whl", hash = "sha256:3f78fd71c4f43a13d342be74ebbc0666fe1f555b8837eb113cb7416856c79682"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-win_amd64.whl", hash = "sha256:876801744b0dee379e4e3c38b76fc89f88834bb15bf92ee07d94acd06ec890a0"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ee825e70b1a209475622f7f7b776785bd68f34af6e7a46e2e42f27b659b5bc26"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1ea665f8ce695bcc37a90ee52de7a7980be5161375d42a0b6c6abedbf0d81f0f"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:143072318f793f53819048fdfe30c321890af0c3ec7cb1dfc9cc87aa88241de2"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c332c8d69fb64979ebf76613c66b985414927a40f8defa16cf1bc028b7b0a7b0"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7fc5a5acafb7d6ccca13bfa8c90f8c51f13d8fb87d95656d3950f0158d3ce53"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:977646e05232579d2e7b9c59e21dbe5261f403a88417f6a6512e70d3f8a046be"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b6356793b84728d9d50ead16ab43c187673831e9d4019013f1402c41b1db9b27"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:bc7bb56d04601d443f24094e9e31ae6deec9ccb23581f75343feebaf30423359"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:77853062a2c45be16fd6b8d6de2a99278ee1d985a7bd8b103e97e41c034006d2"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:78151aa3ec21dccd5cdef6c74c3e73386dcdfaf19bced944169697d7ac7482fc"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-win32.whl", hash = "sha256:dc4926288b2a3e9fd7b50dc6a1909a13bbdadfc67d93f3374d984e56f885579d"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-win_amd64.whl", hash = "sha256:b76bedd166805480ab069612119ea636f5ab8f8771e640ae103e05a4aae3e417"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:8532fd6e6e2dc57bcb3bc90b079c60de896d2128c5d9d6f24a63875a95a088cf"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b0605eaed3eb239e87df0d5e3c6489daae3f7388d455d0c0b4df899519c6a38d"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f8544b092a29a6ddd72f3556a9fcf249ec412e10ad28be6a0c0d948924f2212"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2d423c8d8a3c82d08fe8af900ad5b613ce3632a1249fd6a223941d0735fce493"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e5afae772c00980525f6d6ecf7cbca55676296b580c0e6abb407f15f3706996"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e6f98446430fdf41bd36d4faa6cb409f5140c1c2cf58ce0bbdaf16af7d3f119"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c77e3d1862452565875eb31bdb45ac62502feabbd53429fdc39a1cc341d681ba"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:cb16c65dcb648d0a43a2521f2f0a2300f40639f6f8c1ecbc662141e4e3e1ee07"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:911dda9c487075abd54e644ccdf5e5c16773470a6a5d3826fda76699410066fb"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:57fede879f08d23c85140a360c6a77709113efd1c993923c59fde17aa27599fe"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-win32.whl", hash = "sha256:64cf30263844fa208851ebb13b0732ce674d8ec6a0c86a4e160495d299ba3c93"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-win_amd64.whl", hash = "sha256:81ff62668af011f9a48787564ab7eded4e9fb17a4a6a74af5ffa6a457400d2ab"}, + {file = "psycopg2_binary-2.9.9-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2293b001e319ab0d869d660a704942c9e2cce19745262a8aba2115ef41a0a42a"}, + {file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03ef7df18daf2c4c07e2695e8cfd5ee7f748a1d54d802330985a78d2a5a6dca9"}, + {file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a602ea5aff39bb9fac6308e9c9d82b9a35c2bf288e184a816002c9fae930b77"}, + {file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8359bf4791968c5a78c56103702000105501adb557f3cf772b2c207284273984"}, + {file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:275ff571376626195ab95a746e6a04c7df8ea34638b99fc11160de91f2fef503"}, + {file = "psycopg2_binary-2.9.9-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:f9b5571d33660d5009a8b3c25dc1db560206e2d2f89d3df1cb32d72c0d117d52"}, + {file = "psycopg2_binary-2.9.9-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:420f9bbf47a02616e8554e825208cb947969451978dceb77f95ad09c37791dae"}, + {file = "psycopg2_binary-2.9.9-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:4154ad09dac630a0f13f37b583eae260c6aa885d67dfbccb5b02c33f31a6d420"}, + {file = "psycopg2_binary-2.9.9-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a148c5d507bb9b4f2030a2025c545fccb0e1ef317393eaba42e7eabd28eb6041"}, + {file = "psycopg2_binary-2.9.9-cp37-cp37m-win32.whl", hash = "sha256:68fc1f1ba168724771e38bee37d940d2865cb0f562380a1fb1ffb428b75cb692"}, + {file = "psycopg2_binary-2.9.9-cp37-cp37m-win_amd64.whl", hash = "sha256:281309265596e388ef483250db3640e5f414168c5a67e9c665cafce9492eda2f"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:60989127da422b74a04345096c10d416c2b41bd7bf2a380eb541059e4e999980"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:246b123cc54bb5361588acc54218c8c9fb73068bf227a4a531d8ed56fa3ca7d6"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34eccd14566f8fe14b2b95bb13b11572f7c7d5c36da61caf414d23b91fcc5d94"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18d0ef97766055fec15b5de2c06dd8e7654705ce3e5e5eed3b6651a1d2a9a152"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d3f82c171b4ccd83bbaf35aa05e44e690113bd4f3b7b6cc54d2219b132f3ae55"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ead20f7913a9c1e894aebe47cccf9dc834e1618b7aa96155d2091a626e59c972"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ca49a8119c6cbd77375ae303b0cfd8c11f011abbbd64601167ecca18a87e7cdd"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:323ba25b92454adb36fa425dc5cf6f8f19f78948cbad2e7bc6cdf7b0d7982e59"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:1236ed0952fbd919c100bc839eaa4a39ebc397ed1c08a97fc45fee2a595aa1b3"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:729177eaf0aefca0994ce4cffe96ad3c75e377c7b6f4efa59ebf003b6d398716"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-win32.whl", hash = "sha256:804d99b24ad523a1fe18cc707bf741670332f7c7412e9d49cb5eab67e886b9b5"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-win_amd64.whl", hash = "sha256:a6cdcc3ede532f4a4b96000b6362099591ab4a3e913d70bcbac2b56c872446f7"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:72dffbd8b4194858d0941062a9766f8297e8868e1dd07a7b36212aaa90f49472"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:30dcc86377618a4c8f3b72418df92e77be4254d8f89f14b8e8f57d6d43603c0f"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31a34c508c003a4347d389a9e6fcc2307cc2150eb516462a7a17512130de109e"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:15208be1c50b99203fe88d15695f22a5bed95ab3f84354c494bcb1d08557df67"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1873aade94b74715be2246321c8650cabf5a0d098a95bab81145ffffa4c13876"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a58c98a7e9c021f357348867f537017057c2ed7f77337fd914d0bedb35dace7"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4686818798f9194d03c9129a4d9a702d9e113a89cb03bffe08c6cf799e053291"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ebdc36bea43063116f0486869652cb2ed7032dbc59fbcb4445c4862b5c1ecf7f"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:ca08decd2697fdea0aea364b370b1249d47336aec935f87b8bbfd7da5b2ee9c1"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ac05fb791acf5e1a3e39402641827780fe44d27e72567a000412c648a85ba860"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-win32.whl", hash = "sha256:9dba73be7305b399924709b91682299794887cbbd88e38226ed9f6712eabee90"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-win_amd64.whl", hash = "sha256:f7ae5d65ccfbebdfa761585228eb4d0df3a8b15cfb53bd953e713e09fbb12957"}, +] + +[[package]] +name = "ptyprocess" +version = "0.7.0" +description = "Run a subprocess in a pseudo terminal" +optional = true +python-versions = "*" +files = [ + {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"}, + {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"}, +] + +[[package]] +name = "pulsar-client" +version = "3.4.0" +description = "Apache Pulsar Python client library" +optional = false +python-versions = "*" +files = [ + {file = "pulsar_client-3.4.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:ebf99db5244ff69479283b25621b070492acc4bb643d162d86b90387cb6fdb2a"}, + {file = "pulsar_client-3.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d6cb5d8e1482a8aea758633be23717e0c4bb7dc53784e37915c0048c0382f134"}, + {file = "pulsar_client-3.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b30a7592e42c76034e9a8d64d42dd5bab361425f869de562e9ccad698e19cd88"}, + {file = "pulsar_client-3.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d5963090a78a5644ba25f41da3a6d49ea3f00c972b095baff365916dc246426a"}, + {file = "pulsar_client-3.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:419cdcf577f755e3f31bf264300d9ba158325edb2ee9cee555d81ba1909c094e"}, + {file = "pulsar_client-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:4c93c35ee97307dae153e748b33dcd3d4f06da34bca373321aa2df73f1535705"}, + {file = "pulsar_client-3.4.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:11952fb022ee72debf53b169f4482f9dc5c890be0149ae98779864b3a21f1bd3"}, + {file = "pulsar_client-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8743c320aa96798d20cafa98ea97a68c4295fc4872c23acd5e012fd36cb06ba"}, + {file = "pulsar_client-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33571de99cd898349f17978ba62e2b839ea0275fb7067f31bf5f6ebfeae0987d"}, + {file = "pulsar_client-3.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a60c03c3e70f018538e7cd3fa84d95e283b610272b744166dbc48960a809fa07"}, + {file = "pulsar_client-3.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4c47041267b5843ffec54352d842156c279945f3e976d7025ffa89875ff76390"}, + {file = "pulsar_client-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:49fe4ab04004b476c87ab3ad22fe87346fca564a3e3ca9c0ac58fee45a895d81"}, + {file = "pulsar_client-3.4.0-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:1e077a4839be3ead3de3f05b4c244269dca2df07f47cea0b90544c7e9dc1642f"}, + {file = "pulsar_client-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f202b84e1f683d64672dd1971114600ae2e5c3735587286ff9bfb431385f08e8"}, + {file = "pulsar_client-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c606c04f357341042fa6c75477de7d2204f7ae50aa29c2f74b24e54c85f47f96"}, + {file = "pulsar_client-3.4.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c67b25ede3a578f5a7dc30230e52609ef38191f74b47e5cbdbc98c42df556927"}, + {file = "pulsar_client-3.4.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:b7f8211cc9460cdf4d06e4e1cb878689d2aa4a7e4027bd2a2f1419a79ade16a6"}, + {file = "pulsar_client-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:c5399e9780d6951c69808c0b6175311a966af82fb08addf6e741ae37b1bee7ef"}, + {file = "pulsar_client-3.4.0-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:a2d6c850b60106dc915d3476a490fba547c6748a5f742b68abd30d1a35355b82"}, + {file = "pulsar_client-3.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a52ea8294a9f30eb6f0a2db5dc16e3aad7ff2284f818c48ad3a6b601723be02b"}, + {file = "pulsar_client-3.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1eeeede40108be12222e009285c971e5b8f6433d9f0f8ef934d6a131585921c4"}, + {file = "pulsar_client-3.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9409066c600f2b6f220552c5dfe08aeeabcf07fe0e76367aa5816b2e87a5cf72"}, + {file = "pulsar_client-3.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:58e2f886e6dab43e66c3ce990fe96209e55ab46350506829a637b77b74125fb9"}, + {file = "pulsar_client-3.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:b57dfa5063b0d9dc7664896c55605eac90753e35e80db5a959d3be2be0ab0d48"}, + {file = "pulsar_client-3.4.0-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:7704c664aa2c801af4c2d3a58e9d8ffaeef12ce8a0f71712e9187f9a96da856f"}, + {file = "pulsar_client-3.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0364db563e27442053bdbb8655e7ffb420f491690bc2c78da5a58bd35c658ad"}, + {file = "pulsar_client-3.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3e34de19e0744d8aa3538cb2172076bccd0761b3e94ebadb7bd59765ae3d1ed"}, + {file = "pulsar_client-3.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:dc8be41dec8cb052fb1837550f495e9b73a8b3cf85e07157904ec84832758a65"}, + {file = "pulsar_client-3.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b49d669bed15b7edb9c936704310d57808f1d01c511b94d866f54fe8ffe1752d"}, + {file = "pulsar_client-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:88c93e5fbfc349f3967e931f7a908d15fd4fd725ebdd842423ac9cd961fe293f"}, +] + +[package.dependencies] +certifi = "*" + +[package.extras] +all = ["apache-bookkeeper-client (>=4.16.1)", "fastavro (>=1.9.2)", "grpcio (>=1.60.0)", "prometheus-client", "protobuf (>=3.6.1,<=3.20.3)", "ratelimit"] +avro = ["fastavro (>=1.9.2)"] +functions = ["apache-bookkeeper-client (>=4.16.1)", "grpcio (>=1.60.0)", "prometheus-client", "protobuf (>=3.6.1,<=3.20.3)", "ratelimit"] + +[[package]] +name = "pure-eval" +version = "0.2.2" +description = "Safely evaluate AST nodes without side effects" +optional = true +python-versions = "*" +files = [ + {file = "pure_eval-0.2.2-py3-none-any.whl", hash = "sha256:01eaab343580944bc56080ebe0a674b39ec44a945e6d09ba7db3cb8cec289350"}, + {file = "pure_eval-0.2.2.tar.gz", hash = "sha256:2b45320af6dfaa1750f543d714b6d1c520a1688dec6fd24d339063ce0aaa9ac3"}, +] + +[package.extras] +tests = ["pytest"] + +[[package]] +name = "py-cpuinfo" +version = "9.0.0" +description = "Get CPU info with pure Python" +optional = false +python-versions = "*" +files = [ + {file = "py-cpuinfo-9.0.0.tar.gz", hash = "sha256:3cdbbf3fac90dc6f118bfd64384f309edeadd902d7c8fb17f02ffa1fc3f49690"}, + {file = "py_cpuinfo-9.0.0-py3-none-any.whl", hash = "sha256:859625bc251f64e21f077d099d4162689c762b5d6a4c3c97553d56241c9674d5"}, +] + +[[package]] +name = "pyasn1" +version = "0.5.1" +description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +files = [ + {file = "pyasn1-0.5.1-py2.py3-none-any.whl", hash = "sha256:4439847c58d40b1d0a573d07e3856e95333f1976294494c325775aeca506eb58"}, + {file = "pyasn1-0.5.1.tar.gz", hash = "sha256:6d391a96e59b23130a5cfa74d6fd7f388dbbe26cc8f1edf39fdddf08d9d6676c"}, +] + +[[package]] +name = "pyasn1-modules" +version = "0.3.0" +description = "A collection of ASN.1-based protocols modules" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +files = [ + {file = "pyasn1_modules-0.3.0-py2.py3-none-any.whl", hash = "sha256:d3ccd6ed470d9ffbc716be08bd90efbd44d0734bc9303818f7336070984a162d"}, + {file = "pyasn1_modules-0.3.0.tar.gz", hash = "sha256:5bd01446b736eb9d31512a30d46c1ac3395d676c6f3cafa4c03eb54b9925631c"}, +] + +[package.dependencies] +pyasn1 = ">=0.4.6,<0.6.0" + +[[package]] +name = "pycodestyle" +version = "2.11.1" +description = "Python style guide checker" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pycodestyle-2.11.1-py2.py3-none-any.whl", hash = "sha256:44fe31000b2d866f2e41841b18528a505fbd7fef9017b04eff4e2648a0fadc67"}, + {file = "pycodestyle-2.11.1.tar.gz", hash = "sha256:41ba0e7afc9752dfb53ced5489e89f8186be00e599e712660695b7a75ff2663f"}, +] + +[[package]] +name = "pycparser" +version = "2.21" +description = "C parser in Python" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, + {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, +] + +[[package]] +name = "pydantic" +version = "1.10.13" +description = "Data validation and settings management using python type hints" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pydantic-1.10.13-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:efff03cc7a4f29d9009d1c96ceb1e7a70a65cfe86e89d34e4a5f2ab1e5693737"}, + {file = "pydantic-1.10.13-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3ecea2b9d80e5333303eeb77e180b90e95eea8f765d08c3d278cd56b00345d01"}, + {file = "pydantic-1.10.13-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1740068fd8e2ef6eb27a20e5651df000978edce6da6803c2bef0bc74540f9548"}, + {file = "pydantic-1.10.13-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:84bafe2e60b5e78bc64a2941b4c071a4b7404c5c907f5f5a99b0139781e69ed8"}, + {file = "pydantic-1.10.13-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:bc0898c12f8e9c97f6cd44c0ed70d55749eaf783716896960b4ecce2edfd2d69"}, + {file = "pydantic-1.10.13-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:654db58ae399fe6434e55325a2c3e959836bd17a6f6a0b6ca8107ea0571d2e17"}, + {file = "pydantic-1.10.13-cp310-cp310-win_amd64.whl", hash = "sha256:75ac15385a3534d887a99c713aa3da88a30fbd6204a5cd0dc4dab3d770b9bd2f"}, + {file = "pydantic-1.10.13-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c553f6a156deb868ba38a23cf0df886c63492e9257f60a79c0fd8e7173537653"}, + {file = "pydantic-1.10.13-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5e08865bc6464df8c7d61439ef4439829e3ab62ab1669cddea8dd00cd74b9ffe"}, + {file = "pydantic-1.10.13-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e31647d85a2013d926ce60b84f9dd5300d44535a9941fe825dc349ae1f760df9"}, + {file = "pydantic-1.10.13-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:210ce042e8f6f7c01168b2d84d4c9eb2b009fe7bf572c2266e235edf14bacd80"}, + {file = "pydantic-1.10.13-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:8ae5dd6b721459bfa30805f4c25880e0dd78fc5b5879f9f7a692196ddcb5a580"}, + {file = "pydantic-1.10.13-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f8e81fc5fb17dae698f52bdd1c4f18b6ca674d7068242b2aff075f588301bbb0"}, + {file = "pydantic-1.10.13-cp311-cp311-win_amd64.whl", hash = "sha256:61d9dce220447fb74f45e73d7ff3b530e25db30192ad8d425166d43c5deb6df0"}, + {file = "pydantic-1.10.13-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4b03e42ec20286f052490423682016fd80fda830d8e4119f8ab13ec7464c0132"}, + {file = "pydantic-1.10.13-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f59ef915cac80275245824e9d771ee939133be38215555e9dc90c6cb148aaeb5"}, + {file = "pydantic-1.10.13-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5a1f9f747851338933942db7af7b6ee8268568ef2ed86c4185c6ef4402e80ba8"}, + {file = "pydantic-1.10.13-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:97cce3ae7341f7620a0ba5ef6cf043975cd9d2b81f3aa5f4ea37928269bc1b87"}, + {file = "pydantic-1.10.13-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:854223752ba81e3abf663d685f105c64150873cc6f5d0c01d3e3220bcff7d36f"}, + {file = "pydantic-1.10.13-cp37-cp37m-win_amd64.whl", hash = "sha256:b97c1fac8c49be29486df85968682b0afa77e1b809aff74b83081cc115e52f33"}, + {file = "pydantic-1.10.13-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c958d053453a1c4b1c2062b05cd42d9d5c8eb67537b8d5a7e3c3032943ecd261"}, + {file = "pydantic-1.10.13-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4c5370a7edaac06daee3af1c8b1192e305bc102abcbf2a92374b5bc793818599"}, + {file = "pydantic-1.10.13-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d6f6e7305244bddb4414ba7094ce910560c907bdfa3501e9db1a7fd7eaea127"}, + {file = "pydantic-1.10.13-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d3a3c792a58e1622667a2837512099eac62490cdfd63bd407993aaf200a4cf1f"}, + {file = "pydantic-1.10.13-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:c636925f38b8db208e09d344c7aa4f29a86bb9947495dd6b6d376ad10334fb78"}, + {file = "pydantic-1.10.13-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:678bcf5591b63cc917100dc50ab6caebe597ac67e8c9ccb75e698f66038ea953"}, + {file = "pydantic-1.10.13-cp38-cp38-win_amd64.whl", hash = "sha256:6cf25c1a65c27923a17b3da28a0bdb99f62ee04230c931d83e888012851f4e7f"}, + {file = "pydantic-1.10.13-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8ef467901d7a41fa0ca6db9ae3ec0021e3f657ce2c208e98cd511f3161c762c6"}, + {file = "pydantic-1.10.13-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:968ac42970f57b8344ee08837b62f6ee6f53c33f603547a55571c954a4225691"}, + {file = "pydantic-1.10.13-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9849f031cf8a2f0a928fe885e5a04b08006d6d41876b8bbd2fc68a18f9f2e3fd"}, + {file = "pydantic-1.10.13-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:56e3ff861c3b9c6857579de282ce8baabf443f42ffba355bf070770ed63e11e1"}, + {file = "pydantic-1.10.13-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f00790179497767aae6bcdc36355792c79e7bbb20b145ff449700eb076c5f96"}, + {file = "pydantic-1.10.13-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:75b297827b59bc229cac1a23a2f7a4ac0031068e5be0ce385be1462e7e17a35d"}, + {file = "pydantic-1.10.13-cp39-cp39-win_amd64.whl", hash = "sha256:e70ca129d2053fb8b728ee7d1af8e553a928d7e301a311094b8a0501adc8763d"}, + {file = "pydantic-1.10.13-py3-none-any.whl", hash = "sha256:b87326822e71bd5f313e7d3bfdc77ac3247035ac10b0c0618bd99dcf95b1e687"}, + {file = "pydantic-1.10.13.tar.gz", hash = "sha256:32c8b48dcd3b2ac4e78b0ba4af3a2c2eb6048cb75202f0ea7b34feb740efc340"}, +] + +[package.dependencies] +typing-extensions = ">=4.2.0" + +[package.extras] +dotenv = ["python-dotenv (>=0.10.4)"] +email = ["email-validator (>=1.0.3)"] + +[[package]] +name = "pyflakes" +version = "3.2.0" +description = "passive checker of Python programs" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pyflakes-3.2.0-py2.py3-none-any.whl", hash = "sha256:84b5be138a2dfbb40689ca07e2152deb896a65c3a3e24c251c5c62489568074a"}, + {file = "pyflakes-3.2.0.tar.gz", hash = "sha256:1c61603ff154621fb2a9172037d84dca3500def8c8b630657d1701f026f8af3f"}, +] + +[[package]] +name = "pygments" +version = "2.17.2" +description = "Pygments is a syntax highlighting package written in Python." +optional = true +python-versions = ">=3.7" +files = [ + {file = "pygments-2.17.2-py3-none-any.whl", hash = "sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c"}, + {file = "pygments-2.17.2.tar.gz", hash = "sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367"}, +] + +[package.extras] +plugins = ["importlib-metadata"] +windows-terminal = ["colorama (>=0.4.6)"] + +[[package]] +name = "pylatexenc" +version = "2.10" +description = "Simple LaTeX parser providing latex-to-unicode and unicode-to-latex conversion" +optional = false +python-versions = "*" +files = [ + {file = "pylatexenc-2.10.tar.gz", hash = "sha256:3dd8fd84eb46dc30bee1e23eaab8d8fb5a7f507347b23e5f38ad9675c84f40d3"}, +] + +[[package]] +name = "pyparsing" +version = "3.1.1" +description = "pyparsing module - Classes and methods to define and execute parsing grammars" +optional = false +python-versions = ">=3.6.8" +files = [ + {file = "pyparsing-3.1.1-py3-none-any.whl", hash = "sha256:32c7c0b711493c72ff18a981d24f28aaf9c1fb7ed5e9667c9e84e3db623bdbfb"}, + {file = "pyparsing-3.1.1.tar.gz", hash = "sha256:ede28a1a32462f5a9705e07aea48001a08f7cf81a021585011deba701581a0db"}, +] + +[package.extras] +diagrams = ["jinja2", "railroad-diagrams"] + +[[package]] +name = "pypdf" +version = "3.17.4" +description = "A pure-python PDF library capable of splitting, merging, cropping, and transforming PDF files" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pypdf-3.17.4-py3-none-any.whl", hash = "sha256:6aa0f61b33779b64486de3f42835d3668badd48dac4a536aeb87da187a5eacd2"}, + {file = "pypdf-3.17.4.tar.gz", hash = "sha256:ec96e2e4fc9648ac609d19c00d41e9d606e0ae2ce5a0bbe7691426f5f157166a"}, +] + +[package.extras] +crypto = ["PyCryptodome", "cryptography"] +dev = ["black", "flit", "pip-tools", "pre-commit (<2.18.0)", "pytest-cov", "pytest-socket", "pytest-timeout", "pytest-xdist", "wheel"] +docs = ["myst_parser", "sphinx", "sphinx_rtd_theme"] +full = ["Pillow (>=8.0.0)", "PyCryptodome", "cryptography"] +image = ["Pillow (>=8.0.0)"] + +[[package]] +name = "pypika" +version = "0.48.9" +description = "A SQL query builder API for Python" +optional = false +python-versions = "*" +files = [ + {file = "PyPika-0.48.9.tar.gz", hash = "sha256:838836a61747e7c8380cd1b7ff638694b7a7335345d0f559b04b2cd832ad5378"}, +] + +[[package]] +name = "pyproject-hooks" +version = "1.0.0" +description = "Wrappers to call pyproject.toml-based build backend hooks." +optional = false +python-versions = ">=3.7" +files = [ + {file = "pyproject_hooks-1.0.0-py3-none-any.whl", hash = "sha256:283c11acd6b928d2f6a7c73fa0d01cb2bdc5f07c57a2eeb6e83d5e56b97976f8"}, + {file = "pyproject_hooks-1.0.0.tar.gz", hash = "sha256:f271b298b97f5955d53fb12b72c1fb1948c22c1a6b70b315c54cedaca0264ef5"}, +] + +[package.dependencies] +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} + +[[package]] +name = "pyreadline3" +version = "3.4.1" +description = "A python implementation of GNU readline." +optional = false +python-versions = "*" +files = [ + {file = "pyreadline3-3.4.1-py3-none-any.whl", hash = "sha256:b0efb6516fd4fb07b45949053826a62fa4cb353db5be2bbb4a7aa1fdd1e345fb"}, + {file = "pyreadline3-3.4.1.tar.gz", hash = "sha256:6f3d1f7b8a31ba32b73917cefc1f28cc660562f39aea8646d30bd6eff21f7bae"}, +] + +[[package]] +name = "pysocks" +version = "1.7.1" +description = "A Python SOCKS client module. See https://github.com/Anorov/PySocks for more information." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "PySocks-1.7.1-py27-none-any.whl", hash = "sha256:08e69f092cc6dbe92a0fdd16eeb9b9ffbc13cadfe5ca4c7bd92ffb078b293299"}, + {file = "PySocks-1.7.1-py3-none-any.whl", hash = "sha256:2725bd0a9925919b9b51739eea5f9e2bae91e83288108a9ad338b2e3a4435ee5"}, + {file = "PySocks-1.7.1.tar.gz", hash = "sha256:3f8804571ebe159c380ac6de37643bb4685970655d3bba243530d6558b799aa0"}, +] + +[[package]] +name = "pytest" +version = "7.4.4" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"}, + {file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=0.12,<2.0" +tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} + +[package.extras] +testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytest-asyncio" +version = "0.21.1" +description = "Pytest support for asyncio" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest-asyncio-0.21.1.tar.gz", hash = "sha256:40a7eae6dded22c7b604986855ea48400ab15b069ae38116e8c01238e9eeb64d"}, + {file = "pytest_asyncio-0.21.1-py3-none-any.whl", hash = "sha256:8666c1c8ac02631d7c51ba282e0c69a8a452b211ffedf2599099845da5c5c37b"}, +] + +[package.dependencies] +pytest = ">=7.0.0" + +[package.extras] +docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] +testing = ["coverage (>=6.2)", "flaky (>=3.5.0)", "hypothesis (>=5.7.1)", "mypy (>=0.931)", "pytest-trio (>=0.7.0)"] + +[[package]] +name = "pytest-benchmark" +version = "4.0.0" +description = "A ``pytest`` fixture for benchmarking code. It will group the tests into rounds that are calibrated to the chosen timer." +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest-benchmark-4.0.0.tar.gz", hash = "sha256:fb0785b83efe599a6a956361c0691ae1dbb5318018561af10f3e915caa0048d1"}, + {file = "pytest_benchmark-4.0.0-py3-none-any.whl", hash = "sha256:fdb7db64e31c8b277dff9850d2a2556d8b60bcb0ea6524e36e28ffd7c87f71d6"}, +] + +[package.dependencies] +py-cpuinfo = "*" +pytest = ">=3.8" + +[package.extras] +aspect = ["aspectlib"] +elasticsearch = ["elasticsearch"] +histogram = ["pygal", "pygaljs"] + +[[package]] +name = "pytest-cov" +version = "4.1.0" +description = "Pytest plugin for measuring coverage." +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest-cov-4.1.0.tar.gz", hash = "sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6"}, + {file = "pytest_cov-4.1.0-py3-none-any.whl", hash = "sha256:6ba70b9e97e69fcc3fb45bfeab2d0a138fb65c4d0d6a41ef33983ad114be8c3a"}, +] + +[package.dependencies] +coverage = {version = ">=5.2.1", extras = ["toml"]} +pytest = ">=4.6" + +[package.extras] +testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtualenv"] + +[[package]] +name = "pytest-integration" +version = "0.2.3" +description = "Organizing pytests by integration or not" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pytest_integration-0.2.3-py3-none-any.whl", hash = "sha256:7f59ed1fa1cc8cb240f9495b68bc02c0421cce48589f78e49b7b842231604b12"}, + {file = "pytest_integration-0.2.3.tar.gz", hash = "sha256:b00988a5de8a6826af82d4c7a3485b43fbf32c11235e9f4a8b7225eef5fbcf65"}, +] + +[[package]] +name = "pytest-mock" +version = "3.12.0" +description = "Thin-wrapper around the mock package for easier use with pytest" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest-mock-3.12.0.tar.gz", hash = "sha256:31a40f038c22cad32287bb43932054451ff5583ff094bca6f675df2f8bc1a6e9"}, + {file = "pytest_mock-3.12.0-py3-none-any.whl", hash = "sha256:0972719a7263072da3a21c7f4773069bcc7486027d7e8e1f81d98a47e701bc4f"}, +] + +[package.dependencies] +pytest = ">=5.0" + +[package.extras] +dev = ["pre-commit", "pytest-asyncio", "tox"] + +[[package]] +name = "pytest-recording" +version = "0.13.1" +description = "A pytest plugin that allows you recording of network interactions via VCR.py" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest_recording-0.13.1-py3-none-any.whl", hash = "sha256:e5c75feb2593eb4ed9362182c6640bfe19004204bf9a6082d62c91b5fdb50a3e"}, + {file = "pytest_recording-0.13.1.tar.gz", hash = "sha256:1265d679f39263f115968ec01c2a3bfed250170fd1b0d9e288970b2e4a13737a"}, +] + +[package.dependencies] +pytest = ">=3.5.0" +vcrpy = ">=2.0.1" + +[package.extras] +dev = ["pytest-recording[tests]"] +tests = ["pytest-httpbin", "pytest-mock", "requests", "werkzeug (==3.0.1)"] + +[[package]] +name = "pytest-xdist" +version = "3.5.0" +description = "pytest xdist plugin for distributed testing, most importantly across multiple CPUs" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest-xdist-3.5.0.tar.gz", hash = "sha256:cbb36f3d67e0c478baa57fa4edc8843887e0f6cfc42d677530a36d7472b32d8a"}, + {file = "pytest_xdist-3.5.0-py3-none-any.whl", hash = "sha256:d075629c7e00b611df89f490a5063944bee7a4362a5ff11c7cc7824a03dfce24"}, +] + +[package.dependencies] +execnet = ">=1.1" +pytest = ">=6.2.0" + +[package.extras] +psutil = ["psutil (>=3.0)"] +setproctitle = ["setproctitle"] +testing = ["filelock"] + +[[package]] +name = "python-dateutil" +version = "2.8.2" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, + {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "python-docx" +version = "1.1.0" +description = "Create, read, and update Microsoft Word .docx files." +optional = false +python-versions = ">=3.7" +files = [ + {file = "python-docx-1.1.0.tar.gz", hash = "sha256:5829b722141cf1ab79aedf0c34d9fe9924b29764584c0f2164eb2b02dcdf17c9"}, + {file = "python_docx-1.1.0-py3-none-any.whl", hash = "sha256:bac9773278098a1ddc43a52d84e22f5909c4a3080a624530b3ecb3771b07c6cd"}, +] + +[package.dependencies] +lxml = ">=3.1.0" +typing-extensions = "*" + +[[package]] +name = "python-dotenv" +version = "1.0.0" +description = "Read key-value pairs from a .env file and set them as environment variables" +optional = false +python-versions = ">=3.8" +files = [ + {file = "python-dotenv-1.0.0.tar.gz", hash = "sha256:a8df96034aae6d2d50a4ebe8216326c61c3eb64836776504fcca410e5937a3ba"}, + {file = "python_dotenv-1.0.0-py3-none-any.whl", hash = "sha256:f5971a9226b701070a4bf2c38c89e5a3f0d64de8debda981d1db98583009122a"}, +] + +[package.extras] +cli = ["click (>=5.0)"] + +[[package]] +name = "python-multipart" +version = "0.0.7" +description = "A streaming multipart parser for Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "python_multipart-0.0.7-py3-none-any.whl", hash = "sha256:b1fef9a53b74c795e2347daac8c54b252d9e0df9c619712691c1cc8021bd3c49"}, + {file = "python_multipart-0.0.7.tar.gz", hash = "sha256:288a6c39b06596c1b988bb6794c6fbc80e6c369e35e5062637df256bee0c9af9"}, +] + +[package.extras] +dev = ["atomicwrites (==1.2.1)", "attrs (==19.2.0)", "coverage (==6.5.0)", "hatch", "invoke (==2.2.0)", "more-itertools (==4.3.0)", "pbr (==4.3.0)", "pluggy (==1.0.0)", "py (==1.11.0)", "pytest (==7.2.0)", "pytest-cov (==4.0.0)", "pytest-timeout (==2.1.0)", "pyyaml (==5.1)"] + +[[package]] +name = "pytz" +version = "2023.3.post1" +description = "World timezone definitions, modern and historical" +optional = true +python-versions = "*" +files = [ + {file = "pytz-2023.3.post1-py2.py3-none-any.whl", hash = "sha256:ce42d816b81b68506614c11e8937d3aa9e41007ceb50bfdcb0749b921bf646c7"}, + {file = "pytz-2023.3.post1.tar.gz", hash = "sha256:7b4fddbeb94a1eba4b557da24f19fdf9db575192544270a9101d8509f9f43d7b"}, +] + +[[package]] +name = "pyvis" +version = "0.3.2" +description = "A Python network graph visualization library" +optional = true +python-versions = ">3.6" +files = [ + {file = "pyvis-0.3.2-py3-none-any.whl", hash = "sha256:5720c4ca8161dc5d9ab352015723abb7a8bb8fb443edeb07f7a322db34a97555"}, +] + +[package.dependencies] +ipython = ">=5.3.0" +jinja2 = ">=2.9.6" +jsonpickle = ">=1.4.1" +networkx = ">=1.11" + +[[package]] +name = "pywin32" +version = "306" +description = "Python for Window Extensions" +optional = false +python-versions = "*" +files = [ + {file = "pywin32-306-cp310-cp310-win32.whl", hash = "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d"}, + {file = "pywin32-306-cp310-cp310-win_amd64.whl", hash = "sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8"}, + {file = "pywin32-306-cp311-cp311-win32.whl", hash = "sha256:e65028133d15b64d2ed8f06dd9fbc268352478d4f9289e69c190ecd6818b6407"}, + {file = "pywin32-306-cp311-cp311-win_amd64.whl", hash = "sha256:a7639f51c184c0272e93f244eb24dafca9b1855707d94c192d4a0b4c01e1100e"}, + {file = "pywin32-306-cp311-cp311-win_arm64.whl", hash = "sha256:70dba0c913d19f942a2db25217d9a1b726c278f483a919f1abfed79c9cf64d3a"}, + {file = "pywin32-306-cp312-cp312-win32.whl", hash = "sha256:383229d515657f4e3ed1343da8be101000562bf514591ff383ae940cad65458b"}, + {file = "pywin32-306-cp312-cp312-win_amd64.whl", hash = "sha256:37257794c1ad39ee9be652da0462dc2e394c8159dfd913a8a4e8eb6fd346da0e"}, + {file = "pywin32-306-cp312-cp312-win_arm64.whl", hash = "sha256:5821ec52f6d321aa59e2db7e0a35b997de60c201943557d108af9d4ae1ec7040"}, + {file = "pywin32-306-cp37-cp37m-win32.whl", hash = "sha256:1c73ea9a0d2283d889001998059f5eaaba3b6238f767c9cf2833b13e6a685f65"}, + {file = "pywin32-306-cp37-cp37m-win_amd64.whl", hash = "sha256:72c5f621542d7bdd4fdb716227be0dd3f8565c11b280be6315b06ace35487d36"}, + {file = "pywin32-306-cp38-cp38-win32.whl", hash = "sha256:e4c092e2589b5cf0d365849e73e02c391c1349958c5ac3e9d5ccb9a28e017b3a"}, + {file = "pywin32-306-cp38-cp38-win_amd64.whl", hash = "sha256:e8ac1ae3601bee6ca9f7cb4b5363bf1c0badb935ef243c4733ff9a393b1690c0"}, + {file = "pywin32-306-cp39-cp39-win32.whl", hash = "sha256:e25fd5b485b55ac9c057f67d94bc203f3f6595078d1fb3b458c9c28b7153a802"}, + {file = "pywin32-306-cp39-cp39-win_amd64.whl", hash = "sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4"}, +] + +[[package]] +name = "pyyaml" +version = "6.0.1" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.6" +files = [ + {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, + {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, + {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, + {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, + {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, + {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, + {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, + {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, + {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, + {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, + {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, + {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, + {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, + {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, +] + +[[package]] +name = "readability-lxml" +version = "0.8.1" +description = "fast html to text parser (article readability tool) with python 3 support" +optional = false +python-versions = "*" +files = [ + {file = "readability-lxml-0.8.1.tar.gz", hash = "sha256:e51fea56b5909aaf886d307d48e79e096293255afa567b7d08bca94d25b1a4e1"}, + {file = "readability_lxml-0.8.1-py3-none-any.whl", hash = "sha256:e0d366a21b1bd6cca17de71a4e6ea16fcfaa8b0a5b4004e39e2c7eff884e6305"}, +] + +[package.dependencies] +chardet = "*" +cssselect = "*" +lxml = "*" + +[package.extras] +test = ["timeout-decorator"] + +[[package]] +name = "redis" +version = "5.0.1" +description = "Python client for Redis database and key-value store" +optional = false +python-versions = ">=3.7" +files = [ + {file = "redis-5.0.1-py3-none-any.whl", hash = "sha256:ed4802971884ae19d640775ba3b03aa2e7bd5e8fb8dfaed2decce4d0fc48391f"}, + {file = "redis-5.0.1.tar.gz", hash = "sha256:0dab495cd5753069d3bc650a0dde8a8f9edde16fc5691b689a566eda58100d0f"}, +] + +[package.dependencies] +async-timeout = {version = ">=4.0.2", markers = "python_full_version <= \"3.11.2\""} + +[package.extras] +hiredis = ["hiredis (>=1.0.0)"] +ocsp = ["cryptography (>=36.0.1)", "pyopenssl (==20.0.1)", "requests (>=2.26.0)"] + +[[package]] +name = "referencing" +version = "0.32.1" +description = "JSON Referencing + Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "referencing-0.32.1-py3-none-any.whl", hash = "sha256:7e4dc12271d8e15612bfe35792f5ea1c40970dadf8624602e33db2758f7ee554"}, + {file = "referencing-0.32.1.tar.gz", hash = "sha256:3c57da0513e9563eb7e203ebe9bb3a1b509b042016433bd1e45a2853466c3dd3"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +rpds-py = ">=0.7.0" + +[[package]] +name = "regex" +version = "2023.12.25" +description = "Alternative regular expression module, to replace re." +optional = false +python-versions = ">=3.7" +files = [ + {file = "regex-2023.12.25-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0694219a1d54336fd0445ea382d49d36882415c0134ee1e8332afd1529f0baa5"}, + {file = "regex-2023.12.25-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b014333bd0217ad3d54c143de9d4b9a3ca1c5a29a6d0d554952ea071cff0f1f8"}, + {file = "regex-2023.12.25-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d865984b3f71f6d0af64d0d88f5733521698f6c16f445bb09ce746c92c97c586"}, + {file = "regex-2023.12.25-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e0eabac536b4cc7f57a5f3d095bfa557860ab912f25965e08fe1545e2ed8b4c"}, + {file = "regex-2023.12.25-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c25a8ad70e716f96e13a637802813f65d8a6760ef48672aa3502f4c24ea8b400"}, + {file = "regex-2023.12.25-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a9b6d73353f777630626f403b0652055ebfe8ff142a44ec2cf18ae470395766e"}, + {file = "regex-2023.12.25-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9cc99d6946d750eb75827cb53c4371b8b0fe89c733a94b1573c9dd16ea6c9e4"}, + {file = "regex-2023.12.25-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88d1f7bef20c721359d8675f7d9f8e414ec5003d8f642fdfd8087777ff7f94b5"}, + {file = "regex-2023.12.25-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:cb3fe77aec8f1995611f966d0c656fdce398317f850d0e6e7aebdfe61f40e1cd"}, + {file = "regex-2023.12.25-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7aa47c2e9ea33a4a2a05f40fcd3ea36d73853a2aae7b4feab6fc85f8bf2c9704"}, + {file = "regex-2023.12.25-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:df26481f0c7a3f8739fecb3e81bc9da3fcfae34d6c094563b9d4670b047312e1"}, + {file = "regex-2023.12.25-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:c40281f7d70baf6e0db0c2f7472b31609f5bc2748fe7275ea65a0b4601d9b392"}, + {file = "regex-2023.12.25-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:d94a1db462d5690ebf6ae86d11c5e420042b9898af5dcf278bd97d6bda065423"}, + {file = "regex-2023.12.25-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ba1b30765a55acf15dce3f364e4928b80858fa8f979ad41f862358939bdd1f2f"}, + {file = "regex-2023.12.25-cp310-cp310-win32.whl", hash = "sha256:150c39f5b964e4d7dba46a7962a088fbc91f06e606f023ce57bb347a3b2d4630"}, + {file = "regex-2023.12.25-cp310-cp310-win_amd64.whl", hash = "sha256:09da66917262d9481c719599116c7dc0c321ffcec4b1f510c4f8a066f8768105"}, + {file = "regex-2023.12.25-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:1b9d811f72210fa9306aeb88385b8f8bcef0dfbf3873410413c00aa94c56c2b6"}, + {file = "regex-2023.12.25-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d902a43085a308cef32c0d3aea962524b725403fd9373dea18110904003bac97"}, + {file = "regex-2023.12.25-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d166eafc19f4718df38887b2bbe1467a4f74a9830e8605089ea7a30dd4da8887"}, + {file = "regex-2023.12.25-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7ad32824b7f02bb3c9f80306d405a1d9b7bb89362d68b3c5a9be53836caebdb"}, + {file = "regex-2023.12.25-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:636ba0a77de609d6510235b7f0e77ec494d2657108f777e8765efc060094c98c"}, + {file = "regex-2023.12.25-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fda75704357805eb953a3ee15a2b240694a9a514548cd49b3c5124b4e2ad01b"}, + {file = "regex-2023.12.25-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f72cbae7f6b01591f90814250e636065850c5926751af02bb48da94dfced7baa"}, + {file = "regex-2023.12.25-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:db2a0b1857f18b11e3b0e54ddfefc96af46b0896fb678c85f63fb8c37518b3e7"}, + {file = "regex-2023.12.25-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:7502534e55c7c36c0978c91ba6f61703faf7ce733715ca48f499d3dbbd7657e0"}, + {file = "regex-2023.12.25-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:e8c7e08bb566de4faaf11984af13f6bcf6a08f327b13631d41d62592681d24fe"}, + {file = "regex-2023.12.25-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:283fc8eed679758de38fe493b7d7d84a198b558942b03f017b1f94dda8efae80"}, + {file = "regex-2023.12.25-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:f44dd4d68697559d007462b0a3a1d9acd61d97072b71f6d1968daef26bc744bd"}, + {file = "regex-2023.12.25-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:67d3ccfc590e5e7197750fcb3a2915b416a53e2de847a728cfa60141054123d4"}, + {file = "regex-2023.12.25-cp311-cp311-win32.whl", hash = "sha256:68191f80a9bad283432385961d9efe09d783bcd36ed35a60fb1ff3f1ec2efe87"}, + {file = "regex-2023.12.25-cp311-cp311-win_amd64.whl", hash = "sha256:7d2af3f6b8419661a0c421584cfe8aaec1c0e435ce7e47ee2a97e344b98f794f"}, + {file = "regex-2023.12.25-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8a0ccf52bb37d1a700375a6b395bff5dd15c50acb745f7db30415bae3c2b0715"}, + {file = "regex-2023.12.25-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c3c4a78615b7762740531c27cf46e2f388d8d727d0c0c739e72048beb26c8a9d"}, + {file = "regex-2023.12.25-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ad83e7545b4ab69216cef4cc47e344d19622e28aabec61574b20257c65466d6a"}, + {file = "regex-2023.12.25-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b7a635871143661feccce3979e1727c4e094f2bdfd3ec4b90dfd4f16f571a87a"}, + {file = "regex-2023.12.25-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d498eea3f581fbe1b34b59c697512a8baef88212f92e4c7830fcc1499f5b45a5"}, + {file = "regex-2023.12.25-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:43f7cd5754d02a56ae4ebb91b33461dc67be8e3e0153f593c509e21d219c5060"}, + {file = "regex-2023.12.25-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51f4b32f793812714fd5307222a7f77e739b9bc566dc94a18126aba3b92b98a3"}, + {file = "regex-2023.12.25-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ba99d8077424501b9616b43a2d208095746fb1284fc5ba490139651f971d39d9"}, + {file = "regex-2023.12.25-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4bfc2b16e3ba8850e0e262467275dd4d62f0d045e0e9eda2bc65078c0110a11f"}, + {file = "regex-2023.12.25-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8c2c19dae8a3eb0ea45a8448356ed561be843b13cbc34b840922ddf565498c1c"}, + {file = "regex-2023.12.25-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:60080bb3d8617d96f0fb7e19796384cc2467447ef1c491694850ebd3670bc457"}, + {file = "regex-2023.12.25-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b77e27b79448e34c2c51c09836033056a0547aa360c45eeeb67803da7b0eedaf"}, + {file = "regex-2023.12.25-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:518440c991f514331f4850a63560321f833979d145d7d81186dbe2f19e27ae3d"}, + {file = "regex-2023.12.25-cp312-cp312-win32.whl", hash = "sha256:e2610e9406d3b0073636a3a2e80db05a02f0c3169b5632022b4e81c0364bcda5"}, + {file = "regex-2023.12.25-cp312-cp312-win_amd64.whl", hash = "sha256:cc37b9aeebab425f11f27e5e9e6cf580be7206c6582a64467a14dda211abc232"}, + {file = "regex-2023.12.25-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:da695d75ac97cb1cd725adac136d25ca687da4536154cdc2815f576e4da11c69"}, + {file = "regex-2023.12.25-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d126361607b33c4eb7b36debc173bf25d7805847346dd4d99b5499e1fef52bc7"}, + {file = "regex-2023.12.25-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4719bb05094d7d8563a450cf8738d2e1061420f79cfcc1fa7f0a44744c4d8f73"}, + {file = "regex-2023.12.25-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5dd58946bce44b53b06d94aa95560d0b243eb2fe64227cba50017a8d8b3cd3e2"}, + {file = "regex-2023.12.25-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22a86d9fff2009302c440b9d799ef2fe322416d2d58fc124b926aa89365ec482"}, + {file = "regex-2023.12.25-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2aae8101919e8aa05ecfe6322b278f41ce2994c4a430303c4cd163fef746e04f"}, + {file = "regex-2023.12.25-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e692296c4cc2873967771345a876bcfc1c547e8dd695c6b89342488b0ea55cd8"}, + {file = "regex-2023.12.25-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:263ef5cc10979837f243950637fffb06e8daed7f1ac1e39d5910fd29929e489a"}, + {file = "regex-2023.12.25-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:d6f7e255e5fa94642a0724e35406e6cb7001c09d476ab5fce002f652b36d0c39"}, + {file = "regex-2023.12.25-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:88ad44e220e22b63b0f8f81f007e8abbb92874d8ced66f32571ef8beb0643b2b"}, + {file = "regex-2023.12.25-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:3a17d3ede18f9cedcbe23d2daa8a2cd6f59fe2bf082c567e43083bba3fb00347"}, + {file = "regex-2023.12.25-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d15b274f9e15b1a0b7a45d2ac86d1f634d983ca40d6b886721626c47a400bf39"}, + {file = "regex-2023.12.25-cp37-cp37m-win32.whl", hash = "sha256:ed19b3a05ae0c97dd8f75a5d8f21f7723a8c33bbc555da6bbe1f96c470139d3c"}, + {file = "regex-2023.12.25-cp37-cp37m-win_amd64.whl", hash = "sha256:a6d1047952c0b8104a1d371f88f4ab62e6275567d4458c1e26e9627ad489b445"}, + {file = "regex-2023.12.25-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b43523d7bc2abd757119dbfb38af91b5735eea45537ec6ec3a5ec3f9562a1c53"}, + {file = "regex-2023.12.25-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:efb2d82f33b2212898f1659fb1c2e9ac30493ac41e4d53123da374c3b5541e64"}, + {file = "regex-2023.12.25-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b7fca9205b59c1a3d5031f7e64ed627a1074730a51c2a80e97653e3e9fa0d415"}, + {file = "regex-2023.12.25-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:086dd15e9435b393ae06f96ab69ab2d333f5d65cbe65ca5a3ef0ec9564dfe770"}, + {file = "regex-2023.12.25-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e81469f7d01efed9b53740aedd26085f20d49da65f9c1f41e822a33992cb1590"}, + {file = "regex-2023.12.25-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:34e4af5b27232f68042aa40a91c3b9bb4da0eeb31b7632e0091afc4310afe6cb"}, + {file = "regex-2023.12.25-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9852b76ab558e45b20bf1893b59af64a28bd3820b0c2efc80e0a70a4a3ea51c1"}, + {file = "regex-2023.12.25-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ff100b203092af77d1a5a7abe085b3506b7eaaf9abf65b73b7d6905b6cb76988"}, + {file = "regex-2023.12.25-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:cc038b2d8b1470364b1888a98fd22d616fba2b6309c5b5f181ad4483e0017861"}, + {file = "regex-2023.12.25-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:094ba386bb5c01e54e14434d4caabf6583334090865b23ef58e0424a6286d3dc"}, + {file = "regex-2023.12.25-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5cd05d0f57846d8ba4b71d9c00f6f37d6b97d5e5ef8b3c3840426a475c8f70f4"}, + {file = "regex-2023.12.25-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:9aa1a67bbf0f957bbe096375887b2505f5d8ae16bf04488e8b0f334c36e31360"}, + {file = "regex-2023.12.25-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:98a2636994f943b871786c9e82bfe7883ecdaba2ef5df54e1450fa9869d1f756"}, + {file = "regex-2023.12.25-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:37f8e93a81fc5e5bd8db7e10e62dc64261bcd88f8d7e6640aaebe9bc180d9ce2"}, + {file = "regex-2023.12.25-cp38-cp38-win32.whl", hash = "sha256:d78bd484930c1da2b9679290a41cdb25cc127d783768a0369d6b449e72f88beb"}, + {file = "regex-2023.12.25-cp38-cp38-win_amd64.whl", hash = "sha256:b521dcecebc5b978b447f0f69b5b7f3840eac454862270406a39837ffae4e697"}, + {file = "regex-2023.12.25-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:f7bc09bc9c29ebead055bcba136a67378f03d66bf359e87d0f7c759d6d4ffa31"}, + {file = "regex-2023.12.25-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e14b73607d6231f3cc4622809c196b540a6a44e903bcfad940779c80dffa7be7"}, + {file = "regex-2023.12.25-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9eda5f7a50141291beda3edd00abc2d4a5b16c29c92daf8d5bd76934150f3edc"}, + {file = "regex-2023.12.25-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc6bb9aa69aacf0f6032c307da718f61a40cf970849e471254e0e91c56ffca95"}, + {file = "regex-2023.12.25-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:298dc6354d414bc921581be85695d18912bea163a8b23cac9a2562bbcd5088b1"}, + {file = "regex-2023.12.25-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2f4e475a80ecbd15896a976aa0b386c5525d0ed34d5c600b6d3ebac0a67c7ddf"}, + {file = "regex-2023.12.25-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:531ac6cf22b53e0696f8e1d56ce2396311254eb806111ddd3922c9d937151dae"}, + {file = "regex-2023.12.25-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:22f3470f7524b6da61e2020672df2f3063676aff444db1daa283c2ea4ed259d6"}, + {file = "regex-2023.12.25-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:89723d2112697feaa320c9d351e5f5e7b841e83f8b143dba8e2d2b5f04e10923"}, + {file = "regex-2023.12.25-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0ecf44ddf9171cd7566ef1768047f6e66975788258b1c6c6ca78098b95cf9a3d"}, + {file = "regex-2023.12.25-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:905466ad1702ed4acfd67a902af50b8db1feeb9781436372261808df7a2a7bca"}, + {file = "regex-2023.12.25-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:4558410b7a5607a645e9804a3e9dd509af12fb72b9825b13791a37cd417d73a5"}, + {file = "regex-2023.12.25-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:7e316026cc1095f2a3e8cc012822c99f413b702eaa2ca5408a513609488cb62f"}, + {file = "regex-2023.12.25-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3b1de218d5375cd6ac4b5493e0b9f3df2be331e86520f23382f216c137913d20"}, + {file = "regex-2023.12.25-cp39-cp39-win32.whl", hash = "sha256:11a963f8e25ab5c61348d090bf1b07f1953929c13bd2309a0662e9ff680763c9"}, + {file = "regex-2023.12.25-cp39-cp39-win_amd64.whl", hash = "sha256:e693e233ac92ba83a87024e1d32b5f9ab15ca55ddd916d878146f4e3406b5c91"}, + {file = "regex-2023.12.25.tar.gz", hash = "sha256:29171aa128da69afdf4bde412d5bedc335f2ca8fcfe4489038577d05f16181e5"}, +] + +[[package]] +name = "requests" +version = "2.31.0" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.7" +files = [ + {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, + {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "requests-oauthlib" +version = "1.3.1" +description = "OAuthlib authentication support for Requests." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "requests-oauthlib-1.3.1.tar.gz", hash = "sha256:75beac4a47881eeb94d5ea5d6ad31ef88856affe2332b9aafb52c6452ccf0d7a"}, + {file = "requests_oauthlib-1.3.1-py2.py3-none-any.whl", hash = "sha256:2577c501a2fb8d05a304c09d090d6e47c306fef15809d102b327cf8364bddab5"}, +] + +[package.dependencies] +oauthlib = ">=3.0.0" +requests = ">=2.0.0" + +[package.extras] +rsa = ["oauthlib[signedtoken] (>=3.0.0)"] + +[[package]] +name = "rpds-py" +version = "0.17.1" +description = "Python bindings to Rust's persistent data structures (rpds)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "rpds_py-0.17.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:4128980a14ed805e1b91a7ed551250282a8ddf8201a4e9f8f5b7e6225f54170d"}, + {file = "rpds_py-0.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ff1dcb8e8bc2261a088821b2595ef031c91d499a0c1b031c152d43fe0a6ecec8"}, + {file = "rpds_py-0.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d65e6b4f1443048eb7e833c2accb4fa7ee67cc7d54f31b4f0555b474758bee55"}, + {file = "rpds_py-0.17.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a71169d505af63bb4d20d23a8fbd4c6ce272e7bce6cc31f617152aa784436f29"}, + {file = "rpds_py-0.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:436474f17733c7dca0fbf096d36ae65277e8645039df12a0fa52445ca494729d"}, + {file = "rpds_py-0.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:10162fe3f5f47c37ebf6d8ff5a2368508fe22007e3077bf25b9c7d803454d921"}, + {file = "rpds_py-0.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:720215373a280f78a1814becb1312d4e4d1077b1202a56d2b0815e95ccb99ce9"}, + {file = "rpds_py-0.17.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:70fcc6c2906cfa5c6a552ba7ae2ce64b6c32f437d8f3f8eea49925b278a61453"}, + {file = "rpds_py-0.17.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:91e5a8200e65aaac342a791272c564dffcf1281abd635d304d6c4e6b495f29dc"}, + {file = "rpds_py-0.17.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:99f567dae93e10be2daaa896e07513dd4bf9c2ecf0576e0533ac36ba3b1d5394"}, + {file = "rpds_py-0.17.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:24e4900a6643f87058a27320f81336d527ccfe503984528edde4bb660c8c8d59"}, + {file = "rpds_py-0.17.1-cp310-none-win32.whl", hash = "sha256:0bfb09bf41fe7c51413f563373e5f537eaa653d7adc4830399d4e9bdc199959d"}, + {file = "rpds_py-0.17.1-cp310-none-win_amd64.whl", hash = "sha256:20de7b7179e2031a04042e85dc463a93a82bc177eeba5ddd13ff746325558aa6"}, + {file = "rpds_py-0.17.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:65dcf105c1943cba45d19207ef51b8bc46d232a381e94dd38719d52d3980015b"}, + {file = "rpds_py-0.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:01f58a7306b64e0a4fe042047dd2b7d411ee82e54240284bab63e325762c1147"}, + {file = "rpds_py-0.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:071bc28c589b86bc6351a339114fb7a029f5cddbaca34103aa573eba7b482382"}, + {file = "rpds_py-0.17.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ae35e8e6801c5ab071b992cb2da958eee76340e6926ec693b5ff7d6381441745"}, + {file = "rpds_py-0.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:149c5cd24f729e3567b56e1795f74577aa3126c14c11e457bec1b1c90d212e38"}, + {file = "rpds_py-0.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e796051f2070f47230c745d0a77a91088fbee2cc0502e9b796b9c6471983718c"}, + {file = "rpds_py-0.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60e820ee1004327609b28db8307acc27f5f2e9a0b185b2064c5f23e815f248f8"}, + {file = "rpds_py-0.17.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1957a2ab607f9added64478a6982742eb29f109d89d065fa44e01691a20fc20a"}, + {file = "rpds_py-0.17.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8587fd64c2a91c33cdc39d0cebdaf30e79491cc029a37fcd458ba863f8815383"}, + {file = "rpds_py-0.17.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4dc889a9d8a34758d0fcc9ac86adb97bab3fb7f0c4d29794357eb147536483fd"}, + {file = "rpds_py-0.17.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:2953937f83820376b5979318840f3ee47477d94c17b940fe31d9458d79ae7eea"}, + {file = "rpds_py-0.17.1-cp311-none-win32.whl", hash = "sha256:1bfcad3109c1e5ba3cbe2f421614e70439f72897515a96c462ea657261b96518"}, + {file = "rpds_py-0.17.1-cp311-none-win_amd64.whl", hash = "sha256:99da0a4686ada4ed0f778120a0ea8d066de1a0a92ab0d13ae68492a437db78bf"}, + {file = "rpds_py-0.17.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:1dc29db3900cb1bb40353772417800f29c3d078dbc8024fd64655a04ee3c4bdf"}, + {file = "rpds_py-0.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:82ada4a8ed9e82e443fcef87e22a3eed3654dd3adf6e3b3a0deb70f03e86142a"}, + {file = "rpds_py-0.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d36b2b59e8cc6e576f8f7b671e32f2ff43153f0ad6d0201250a7c07f25d570e"}, + {file = "rpds_py-0.17.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3677fcca7fb728c86a78660c7fb1b07b69b281964673f486ae72860e13f512ad"}, + {file = "rpds_py-0.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:516fb8c77805159e97a689e2f1c80655c7658f5af601c34ffdb916605598cda2"}, + {file = "rpds_py-0.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:df3b6f45ba4515632c5064e35ca7f31d51d13d1479673185ba8f9fefbbed58b9"}, + {file = "rpds_py-0.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a967dd6afda7715d911c25a6ba1517975acd8d1092b2f326718725461a3d33f9"}, + {file = "rpds_py-0.17.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dbbb95e6fc91ea3102505d111b327004d1c4ce98d56a4a02e82cd451f9f57140"}, + {file = "rpds_py-0.17.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:02866e060219514940342a1f84303a1ef7a1dad0ac311792fbbe19b521b489d2"}, + {file = "rpds_py-0.17.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:2528ff96d09f12e638695f3a2e0c609c7b84c6df7c5ae9bfeb9252b6fa686253"}, + {file = "rpds_py-0.17.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:bd345a13ce06e94c753dab52f8e71e5252aec1e4f8022d24d56decd31e1b9b23"}, + {file = "rpds_py-0.17.1-cp312-none-win32.whl", hash = "sha256:2a792b2e1d3038daa83fa474d559acfd6dc1e3650ee93b2662ddc17dbff20ad1"}, + {file = "rpds_py-0.17.1-cp312-none-win_amd64.whl", hash = "sha256:292f7344a3301802e7c25c53792fae7d1593cb0e50964e7bcdcc5cf533d634e3"}, + {file = "rpds_py-0.17.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:8ffe53e1d8ef2520ebcf0c9fec15bb721da59e8ef283b6ff3079613b1e30513d"}, + {file = "rpds_py-0.17.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4341bd7579611cf50e7b20bb8c2e23512a3dc79de987a1f411cb458ab670eb90"}, + {file = "rpds_py-0.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f4eb548daf4836e3b2c662033bfbfc551db58d30fd8fe660314f86bf8510b93"}, + {file = "rpds_py-0.17.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b686f25377f9c006acbac63f61614416a6317133ab7fafe5de5f7dc8a06d42eb"}, + {file = "rpds_py-0.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4e21b76075c01d65d0f0f34302b5a7457d95721d5e0667aea65e5bb3ab415c25"}, + {file = "rpds_py-0.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b86b21b348f7e5485fae740d845c65a880f5d1eda1e063bc59bef92d1f7d0c55"}, + {file = "rpds_py-0.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f175e95a197f6a4059b50757a3dca33b32b61691bdbd22c29e8a8d21d3914cae"}, + {file = "rpds_py-0.17.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1701fc54460ae2e5efc1dd6350eafd7a760f516df8dbe51d4a1c79d69472fbd4"}, + {file = "rpds_py-0.17.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:9051e3d2af8f55b42061603e29e744724cb5f65b128a491446cc029b3e2ea896"}, + {file = "rpds_py-0.17.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:7450dbd659fed6dd41d1a7d47ed767e893ba402af8ae664c157c255ec6067fde"}, + {file = "rpds_py-0.17.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:5a024fa96d541fd7edaa0e9d904601c6445e95a729a2900c5aec6555fe921ed6"}, + {file = "rpds_py-0.17.1-cp38-none-win32.whl", hash = "sha256:da1ead63368c04a9bded7904757dfcae01eba0e0f9bc41d3d7f57ebf1c04015a"}, + {file = "rpds_py-0.17.1-cp38-none-win_amd64.whl", hash = "sha256:841320e1841bb53fada91c9725e766bb25009cfd4144e92298db296fb6c894fb"}, + {file = "rpds_py-0.17.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:f6c43b6f97209e370124baf2bf40bb1e8edc25311a158867eb1c3a5d449ebc7a"}, + {file = "rpds_py-0.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5e7d63ec01fe7c76c2dbb7e972fece45acbb8836e72682bde138e7e039906e2c"}, + {file = "rpds_py-0.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81038ff87a4e04c22e1d81f947c6ac46f122e0c80460b9006e6517c4d842a6ec"}, + {file = "rpds_py-0.17.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:810685321f4a304b2b55577c915bece4c4a06dfe38f6e62d9cc1d6ca8ee86b99"}, + {file = "rpds_py-0.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:25f071737dae674ca8937a73d0f43f5a52e92c2d178330b4c0bb6ab05586ffa6"}, + {file = "rpds_py-0.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aa5bfb13f1e89151ade0eb812f7b0d7a4d643406caaad65ce1cbabe0a66d695f"}, + {file = "rpds_py-0.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dfe07308b311a8293a0d5ef4e61411c5c20f682db6b5e73de6c7c8824272c256"}, + {file = "rpds_py-0.17.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a000133a90eea274a6f28adc3084643263b1e7c1a5a66eb0a0a7a36aa757ed74"}, + {file = "rpds_py-0.17.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d0e8a6434a3fbf77d11448c9c25b2f25244226cfbec1a5159947cac5b8c5fa4"}, + {file = "rpds_py-0.17.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:efa767c220d94aa4ac3a6dd3aeb986e9f229eaf5bce92d8b1b3018d06bed3772"}, + {file = "rpds_py-0.17.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:dbc56680ecf585a384fbd93cd42bc82668b77cb525343170a2d86dafaed2a84b"}, + {file = "rpds_py-0.17.1-cp39-none-win32.whl", hash = "sha256:270987bc22e7e5a962b1094953ae901395e8c1e1e83ad016c5cfcfff75a15a3f"}, + {file = "rpds_py-0.17.1-cp39-none-win_amd64.whl", hash = "sha256:2a7b2f2f56a16a6d62e55354dd329d929560442bd92e87397b7a9586a32e3e76"}, + {file = "rpds_py-0.17.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a3264e3e858de4fc601741498215835ff324ff2482fd4e4af61b46512dd7fc83"}, + {file = "rpds_py-0.17.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:f2f3b28b40fddcb6c1f1f6c88c6f3769cd933fa493ceb79da45968a21dccc920"}, + {file = "rpds_py-0.17.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9584f8f52010295a4a417221861df9bea4c72d9632562b6e59b3c7b87a1522b7"}, + {file = "rpds_py-0.17.1-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c64602e8be701c6cfe42064b71c84ce62ce66ddc6422c15463fd8127db3d8066"}, + {file = "rpds_py-0.17.1-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:060f412230d5f19fc8c8b75f315931b408d8ebf56aec33ef4168d1b9e54200b1"}, + {file = "rpds_py-0.17.1-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b9412abdf0ba70faa6e2ee6c0cc62a8defb772e78860cef419865917d86c7342"}, + {file = "rpds_py-0.17.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9737bdaa0ad33d34c0efc718741abaafce62fadae72c8b251df9b0c823c63b22"}, + {file = "rpds_py-0.17.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9f0e4dc0f17dcea4ab9d13ac5c666b6b5337042b4d8f27e01b70fae41dd65c57"}, + {file = "rpds_py-0.17.1-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:1db228102ab9d1ff4c64148c96320d0be7044fa28bd865a9ce628ce98da5973d"}, + {file = "rpds_py-0.17.1-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:d8bbd8e56f3ba25a7d0cf980fc42b34028848a53a0e36c9918550e0280b9d0b6"}, + {file = "rpds_py-0.17.1-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:be22ae34d68544df293152b7e50895ba70d2a833ad9566932d750d3625918b82"}, + {file = "rpds_py-0.17.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:bf046179d011e6114daf12a534d874958b039342b347348a78b7cdf0dd9d6041"}, + {file = "rpds_py-0.17.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:1a746a6d49665058a5896000e8d9d2f1a6acba8a03b389c1e4c06e11e0b7f40d"}, + {file = "rpds_py-0.17.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0b8bf5b8db49d8fd40f54772a1dcf262e8be0ad2ab0206b5a2ec109c176c0a4"}, + {file = "rpds_py-0.17.1-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f7f4cb1f173385e8a39c29510dd11a78bf44e360fb75610594973f5ea141028b"}, + {file = "rpds_py-0.17.1-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7fbd70cb8b54fe745301921b0816c08b6d917593429dfc437fd024b5ba713c58"}, + {file = "rpds_py-0.17.1-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9bdf1303df671179eaf2cb41e8515a07fc78d9d00f111eadbe3e14262f59c3d0"}, + {file = "rpds_py-0.17.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fad059a4bd14c45776600d223ec194e77db6c20255578bb5bcdd7c18fd169361"}, + {file = "rpds_py-0.17.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3664d126d3388a887db44c2e293f87d500c4184ec43d5d14d2d2babdb4c64cad"}, + {file = "rpds_py-0.17.1-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:698ea95a60c8b16b58be9d854c9f993c639f5c214cf9ba782eca53a8789d6b19"}, + {file = "rpds_py-0.17.1-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:c3d2010656999b63e628a3c694f23020322b4178c450dc478558a2b6ef3cb9bb"}, + {file = "rpds_py-0.17.1-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:938eab7323a736533f015e6069a7d53ef2dcc841e4e533b782c2bfb9fb12d84b"}, + {file = "rpds_py-0.17.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:1e626b365293a2142a62b9a614e1f8e331b28f3ca57b9f05ebbf4cf2a0f0bdc5"}, + {file = "rpds_py-0.17.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:380e0df2e9d5d5d339803cfc6d183a5442ad7ab3c63c2a0982e8c824566c5ccc"}, + {file = "rpds_py-0.17.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b760a56e080a826c2e5af09002c1a037382ed21d03134eb6294812dda268c811"}, + {file = "rpds_py-0.17.1-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5576ee2f3a309d2bb403ec292d5958ce03953b0e57a11d224c1f134feaf8c40f"}, + {file = "rpds_py-0.17.1-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1f3c3461ebb4c4f1bbc70b15d20b565759f97a5aaf13af811fcefc892e9197ba"}, + {file = "rpds_py-0.17.1-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:637b802f3f069a64436d432117a7e58fab414b4e27a7e81049817ae94de45d8d"}, + {file = "rpds_py-0.17.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffee088ea9b593cc6160518ba9bd319b5475e5f3e578e4552d63818773c6f56a"}, + {file = "rpds_py-0.17.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3ac732390d529d8469b831949c78085b034bff67f584559340008d0f6041a049"}, + {file = "rpds_py-0.17.1-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:93432e747fb07fa567ad9cc7aaadd6e29710e515aabf939dfbed8046041346c6"}, + {file = "rpds_py-0.17.1-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:7b7d9ca34542099b4e185b3c2a2b2eda2e318a7dbde0b0d83357a6d4421b5296"}, + {file = "rpds_py-0.17.1-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:0387ce69ba06e43df54e43968090f3626e231e4bc9150e4c3246947567695f68"}, + {file = "rpds_py-0.17.1.tar.gz", hash = "sha256:0210b2668f24c078307260bf88bdac9d6f1093635df5123789bfee4d8d7fc8e7"}, +] + +[[package]] +name = "rsa" +version = "4.9" +description = "Pure-Python RSA implementation" +optional = false +python-versions = ">=3.6,<4" +files = [ + {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"}, + {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"}, +] + +[package.dependencies] +pyasn1 = ">=0.1.3" + +[[package]] +name = "s3transfer" +version = "0.10.0" +description = "An Amazon S3 Transfer Manager" +optional = false +python-versions = ">= 3.8" +files = [ + {file = "s3transfer-0.10.0-py3-none-any.whl", hash = "sha256:3cdb40f5cfa6966e812209d0994f2a4709b561c88e90cf00c2696d2df4e56b2e"}, + {file = "s3transfer-0.10.0.tar.gz", hash = "sha256:d0c8bbf672d5eebbe4e57945e23b972d963f07d82f661cabf678a5c88831595b"}, +] + +[package.dependencies] +botocore = ">=1.33.2,<2.0a.0" + +[package.extras] +crt = ["botocore[crt] (>=1.33.2,<2.0a.0)"] + +[[package]] +name = "selenium" +version = "4.16.0" +description = "" +optional = false +python-versions = ">=3.8" +files = [ + {file = "selenium-4.16.0-py3-none-any.whl", hash = "sha256:aec71f4e6ed6cb3ec25c9c1b5ed56ae31b6da0a7f17474c7566d303f84e6219f"}, + {file = "selenium-4.16.0.tar.gz", hash = "sha256:b2e987a445306151f7be0e6dfe2aa72a479c2ac6a91b9d5ef2d6dd4e49ad0435"}, +] + +[package.dependencies] +certifi = ">=2021.10.8" +trio = ">=0.17,<1.0" +trio-websocket = ">=0.9,<1.0" +urllib3 = {version = ">=1.26,<3", extras = ["socks"]} + +[[package]] +name = "sentry-sdk" +version = "1.40.4" +description = "Python client for Sentry (https://sentry.io)" +optional = false +python-versions = "*" +files = [ + {file = "sentry-sdk-1.40.4.tar.gz", hash = "sha256:657abae98b0050a0316f0873d7149f951574ae6212f71d2e3a1c4c88f62d6456"}, + {file = "sentry_sdk-1.40.4-py2.py3-none-any.whl", hash = "sha256:ac5cf56bb897ec47135d239ddeedf7c1c12d406fb031a4c0caa07399ed014d7e"}, +] + +[package.dependencies] +certifi = "*" +urllib3 = {version = ">=1.26.11", markers = "python_version >= \"3.6\""} + +[package.extras] +aiohttp = ["aiohttp (>=3.5)"] +arq = ["arq (>=0.23)"] +asyncpg = ["asyncpg (>=0.23)"] +beam = ["apache-beam (>=2.12)"] +bottle = ["bottle (>=0.12.13)"] +celery = ["celery (>=3)"] +chalice = ["chalice (>=1.16.0)"] +clickhouse-driver = ["clickhouse-driver (>=0.2.0)"] +django = ["django (>=1.8)"] +falcon = ["falcon (>=1.4)"] +fastapi = ["fastapi (>=0.79.0)"] +flask = ["blinker (>=1.1)", "flask (>=0.11)", "markupsafe"] +grpcio = ["grpcio (>=1.21.1)"] +httpx = ["httpx (>=0.16.0)"] +huey = ["huey (>=2)"] +loguru = ["loguru (>=0.5)"] +opentelemetry = ["opentelemetry-distro (>=0.35b0)"] +opentelemetry-experimental = ["opentelemetry-distro (>=0.40b0,<1.0)", "opentelemetry-instrumentation-aiohttp-client (>=0.40b0,<1.0)", "opentelemetry-instrumentation-django (>=0.40b0,<1.0)", "opentelemetry-instrumentation-fastapi (>=0.40b0,<1.0)", "opentelemetry-instrumentation-flask (>=0.40b0,<1.0)", "opentelemetry-instrumentation-requests (>=0.40b0,<1.0)", "opentelemetry-instrumentation-sqlite3 (>=0.40b0,<1.0)", "opentelemetry-instrumentation-urllib (>=0.40b0,<1.0)"] +pure-eval = ["asttokens", "executing", "pure_eval"] +pymongo = ["pymongo (>=3.1)"] +pyspark = ["pyspark (>=2.4.4)"] +quart = ["blinker (>=1.1)", "quart (>=0.16.1)"] +rq = ["rq (>=0.6)"] +sanic = ["sanic (>=0.8)"] +sqlalchemy = ["sqlalchemy (>=1.2)"] +starlette = ["starlette (>=0.19.1)"] +starlite = ["starlite (>=1.48)"] +tornado = ["tornado (>=5)"] + +[[package]] +name = "setuptools" +version = "69.0.3" +description = "Easily download, build, install, upgrade, and uninstall Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "setuptools-69.0.3-py3-none-any.whl", hash = "sha256:385eb4edd9c9d5c17540511303e39a147ce2fc04bc55289c322b9e5904fe2c05"}, + {file = "setuptools-69.0.3.tar.gz", hash = "sha256:be1af57fc409f93647f2e8e4573a142ed38724b8cdd389706a867bb4efcf1e78"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.1)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] + +[[package]] +name = "shellingham" +version = "1.5.4" +description = "Tool to Detect Surrounding Shell" +optional = false +python-versions = ">=3.7" +files = [ + {file = "shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686"}, + {file = "shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de"}, +] + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "smart-open" +version = "6.4.0" +description = "Utils for streaming large files (S3, HDFS, GCS, Azure Blob Storage, gzip, bz2...)" +optional = false +python-versions = ">=3.6,<4.0" +files = [ + {file = "smart_open-6.4.0-py3-none-any.whl", hash = "sha256:8d3ef7e6997e8e42dd55c74166ed21e6ac70664caa32dd940b26d54a8f6b4142"}, + {file = "smart_open-6.4.0.tar.gz", hash = "sha256:be3c92c246fbe80ebce8fbacb180494a481a77fcdcb7c1aadb2ea5b9c2bee8b9"}, +] + +[package.extras] +all = ["azure-common", "azure-core", "azure-storage-blob", "boto3", "google-cloud-storage (>=2.6.0)", "paramiko", "requests"] +azure = ["azure-common", "azure-core", "azure-storage-blob"] +gcs = ["google-cloud-storage (>=2.6.0)"] +http = ["requests"] +s3 = ["boto3"] +ssh = ["paramiko"] +test = ["azure-common", "azure-core", "azure-storage-blob", "boto3", "google-cloud-storage (>=2.6.0)", "moto[server]", "paramiko", "pytest", "pytest-rerunfailures", "requests", "responses"] +webhdfs = ["requests"] + +[[package]] +name = "smmap" +version = "5.0.1" +description = "A pure Python implementation of a sliding window memory map manager" +optional = false +python-versions = ">=3.7" +files = [ + {file = "smmap-5.0.1-py3-none-any.whl", hash = "sha256:e6d8668fa5f93e706934a62d7b4db19c8d9eb8cf2adbb75ef1b675aa332b69da"}, + {file = "smmap-5.0.1.tar.gz", hash = "sha256:dceeb6c0028fdb6734471eb07c0cd2aae706ccaecab45965ee83f11c8d3b1f62"}, +] + +[[package]] +name = "sniffio" +version = "1.3.0" +description = "Sniff out which async library your code is running under" +optional = false +python-versions = ">=3.7" +files = [ + {file = "sniffio-1.3.0-py3-none-any.whl", hash = "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"}, + {file = "sniffio-1.3.0.tar.gz", hash = "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101"}, +] + +[[package]] +name = "sortedcontainers" +version = "2.4.0" +description = "Sorted Containers -- Sorted List, Sorted Dict, Sorted Set" +optional = false +python-versions = "*" +files = [ + {file = "sortedcontainers-2.4.0-py2.py3-none-any.whl", hash = "sha256:a163dcaede0f1c021485e957a39245190e74249897e2ae4b2aa38595db237ee0"}, + {file = "sortedcontainers-2.4.0.tar.gz", hash = "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88"}, +] + +[[package]] +name = "soupsieve" +version = "2.5" +description = "A modern CSS selector implementation for Beautiful Soup." +optional = false +python-versions = ">=3.8" +files = [ + {file = "soupsieve-2.5-py3-none-any.whl", hash = "sha256:eaa337ff55a1579b6549dc679565eac1e3d000563bcb1c8ab0d0fefbc0c2cdc7"}, + {file = "soupsieve-2.5.tar.gz", hash = "sha256:5663d5a7b3bfaeee0bc4372e7fc48f9cff4940b3eec54a6451cc5299f1097690"}, +] + +[[package]] +name = "spacy" +version = "3.5.4" +description = "Industrial-strength Natural Language Processing (NLP) in Python" +optional = false +python-versions = ">=3.6" +files = [ + {file = "spacy-3.5.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39209f73508027a99ddf2a615ae99ceb6db84f9f10c0050c7dc0c78cd8d662e9"}, + {file = "spacy-3.5.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:abc2e347fa2217c97c602a591cd4202f3bea546e3beafe2b92dd4d2984b68299"}, + {file = "spacy-3.5.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d97294c588fcd05d0c644303dd54c8aa437bfd895b1c5e57f51ac0af8304181"}, + {file = "spacy-3.5.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e7992c6424fd28187064ee32c98998db6194d65e017e958993dd16f6953c1c1"}, + {file = "spacy-3.5.4-cp310-cp310-win_amd64.whl", hash = "sha256:64cac9da114a2b98794a40e20ff2f8547dec01d44660c8d0dd64b2a5b32bf929"}, + {file = "spacy-3.5.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2796778a91f2d690864124a98f2fa4d3a82db6585244137d9283b4fbce21ef89"}, + {file = "spacy-3.5.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:97aea4aceb7d8a5a4183bad59957d6154d95e80d0b8a25690305fe5d4a8b8cb6"}, + {file = "spacy-3.5.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2aeb5f25ffb469c7c1f93a730c8810efe69ce65bb60318ae0e65b5106108df0c"}, + {file = "spacy-3.5.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b0f7166d8f20c6332d0ed89a1bc32b3030f223c178cc26597b094190c853a7ed"}, + {file = "spacy-3.5.4-cp311-cp311-win_amd64.whl", hash = "sha256:35dec614492c849f6c6b29dc0a424502dc193f6775d4f55573ad7d8f55e06561"}, + {file = "spacy-3.5.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0240874ed34d9e00df68cdbc3f1ca3741232233dc1194f24c18f73ae7dac7644"}, + {file = "spacy-3.5.4-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d1eb72163c8e8cb070bdafcfb8fb3c88f50a5b688500e8ef788fb4fb79e9997"}, + {file = "spacy-3.5.4-cp36-cp36m-win_amd64.whl", hash = "sha256:a4c7ba041aaffc9ecd0a3f9dff86f392939045221315f52e3044fe1453fc5d48"}, + {file = "spacy-3.5.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:61ab38c6732be402063f55b8b004b451b17dd20ccad966ab3abce9738e3859e4"}, + {file = "spacy-3.5.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b49807f1c47430f02365e7b0f25d2bddaaa917430e3dc3fbf0d60e0bffd5a06e"}, + {file = "spacy-3.5.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b59bdd41b372c52b639c6bb3b2e4d37cc5e6175b1d187f25c33a6b56c1d3d08c"}, + {file = "spacy-3.5.4-cp37-cp37m-win_amd64.whl", hash = "sha256:ab802c2e06ba14556ea4c160309a8369fad4bd847895e341e8b0bfe7c0e1bfcf"}, + {file = "spacy-3.5.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:406d09abc7c061ce1f461311557495608e25be5fc405f6a840e14a9a044f84bd"}, + {file = "spacy-3.5.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0e9e0f9d95c6fbdc25f38e6d3bdad7d85723bcc8854333cc5f906d9a4db2b76a"}, + {file = "spacy-3.5.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1476db25cff811a43a19b79d12ce5b2a38dcbdc378fb9923f66aeb31c7f528c8"}, + {file = "spacy-3.5.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fff8986c3b9aa9b5a99a1ad57e842985f71b450102d1e102d4ac951f595688c"}, + {file = "spacy-3.5.4-cp38-cp38-win_amd64.whl", hash = "sha256:d9b0d87f50a8e7592da2a7480956abd418ac143327b1c56244eca3c226c7332e"}, + {file = "spacy-3.5.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:abf05e7f64c9136602ec7cec54ff616c79dd89634ded5575587c619da9367db9"}, + {file = "spacy-3.5.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c270d2b37e6896b7959d493e56ed4d37146d7eec732253c91f07379685c08dd6"}, + {file = "spacy-3.5.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:af50c9838bf2ffa80397fb20f02127b0b66f1b26dcdcee86185292199c803041"}, + {file = "spacy-3.5.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed28a237c57f95a36b891d3b60773b8efb81f6c470f48fea7e4ec71adb8b85a5"}, + {file = "spacy-3.5.4-cp39-cp39-win_amd64.whl", hash = "sha256:ad83768225e0ab2ee259ff5c1c759adb5c76649fb343ebd3bd777a3ec3742004"}, + {file = "spacy-3.5.4.tar.gz", hash = "sha256:9a9c167e9dcebfefacc75dac34a8e72becbe348eb45bbf06a6c0523ae05ac425"}, +] + +[package.dependencies] +catalogue = ">=2.0.6,<2.1.0" +cymem = ">=2.0.2,<2.1.0" +jinja2 = "*" +langcodes = ">=3.2.0,<4.0.0" +murmurhash = ">=0.28.0,<1.1.0" +numpy = ">=1.15.0" +packaging = ">=20.0" +pathy = ">=0.10.0" +preshed = ">=3.0.2,<3.1.0" +pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<1.11.0" +requests = ">=2.13.0,<3.0.0" +setuptools = "*" +smart-open = ">=5.2.1,<7.0.0" +spacy-legacy = ">=3.0.11,<3.1.0" +spacy-loggers = ">=1.0.0,<2.0.0" +srsly = ">=2.4.3,<3.0.0" +thinc = ">=8.1.8,<8.2.0" +tqdm = ">=4.38.0,<5.0.0" +typer = ">=0.3.0,<0.10.0" +wasabi = ">=0.9.1,<1.2.0" + +[package.extras] +apple = ["thinc-apple-ops (>=0.1.0.dev0,<1.0.0)"] +cuda = ["cupy (>=5.0.0b4,<13.0.0)"] +cuda-autodetect = ["cupy-wheel (>=11.0.0,<13.0.0)"] +cuda100 = ["cupy-cuda100 (>=5.0.0b4,<13.0.0)"] +cuda101 = ["cupy-cuda101 (>=5.0.0b4,<13.0.0)"] +cuda102 = ["cupy-cuda102 (>=5.0.0b4,<13.0.0)"] +cuda110 = ["cupy-cuda110 (>=5.0.0b4,<13.0.0)"] +cuda111 = ["cupy-cuda111 (>=5.0.0b4,<13.0.0)"] +cuda112 = ["cupy-cuda112 (>=5.0.0b4,<13.0.0)"] +cuda113 = ["cupy-cuda113 (>=5.0.0b4,<13.0.0)"] +cuda114 = ["cupy-cuda114 (>=5.0.0b4,<13.0.0)"] +cuda115 = ["cupy-cuda115 (>=5.0.0b4,<13.0.0)"] +cuda116 = ["cupy-cuda116 (>=5.0.0b4,<13.0.0)"] +cuda117 = ["cupy-cuda117 (>=5.0.0b4,<13.0.0)"] +cuda11x = ["cupy-cuda11x (>=11.0.0,<13.0.0)"] +cuda80 = ["cupy-cuda80 (>=5.0.0b4,<13.0.0)"] +cuda90 = ["cupy-cuda90 (>=5.0.0b4,<13.0.0)"] +cuda91 = ["cupy-cuda91 (>=5.0.0b4,<13.0.0)"] +cuda92 = ["cupy-cuda92 (>=5.0.0b4,<13.0.0)"] +ja = ["sudachidict-core (>=20211220)", "sudachipy (>=0.5.2,!=0.6.1)"] +ko = ["natto-py (>=0.9.0)"] +lookups = ["spacy-lookups-data (>=1.0.3,<1.1.0)"] +ray = ["spacy-ray (>=0.1.0,<1.0.0)"] +th = ["pythainlp (>=2.0)"] +transformers = ["spacy-transformers (>=1.1.2,<1.3.0)"] + +[[package]] +name = "spacy-legacy" +version = "3.0.12" +description = "Legacy registered functions for spaCy backwards compatibility" +optional = false +python-versions = ">=3.6" +files = [ + {file = "spacy-legacy-3.0.12.tar.gz", hash = "sha256:b37d6e0c9b6e1d7ca1cf5bc7152ab64a4c4671f59c85adaf7a3fcb870357a774"}, + {file = "spacy_legacy-3.0.12-py2.py3-none-any.whl", hash = "sha256:476e3bd0d05f8c339ed60f40986c07387c0a71479245d6d0f4298dbd52cda55f"}, +] + +[[package]] +name = "spacy-loggers" +version = "1.0.5" +description = "Logging utilities for SpaCy" +optional = false +python-versions = ">=3.6" +files = [ + {file = "spacy-loggers-1.0.5.tar.gz", hash = "sha256:d60b0bdbf915a60e516cc2e653baeff946f0cfc461b452d11a4d5458c6fe5f24"}, + {file = "spacy_loggers-1.0.5-py3-none-any.whl", hash = "sha256:196284c9c446cc0cdb944005384270d775fdeaf4f494d8e269466cfa497ef645"}, +] + +[[package]] +name = "sqlalchemy" +version = "2.0.25" +description = "Database Abstraction Library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "SQLAlchemy-2.0.25-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4344d059265cc8b1b1be351bfb88749294b87a8b2bbe21dfbe066c4199541ebd"}, + {file = "SQLAlchemy-2.0.25-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6f9e2e59cbcc6ba1488404aad43de005d05ca56e069477b33ff74e91b6319735"}, + {file = "SQLAlchemy-2.0.25-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:84daa0a2055df9ca0f148a64fdde12ac635e30edbca80e87df9b3aaf419e144a"}, + {file = "SQLAlchemy-2.0.25-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc8b7dabe8e67c4832891a5d322cec6d44ef02f432b4588390017f5cec186a84"}, + {file = "SQLAlchemy-2.0.25-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:f5693145220517b5f42393e07a6898acdfe820e136c98663b971906120549da5"}, + {file = "SQLAlchemy-2.0.25-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:db854730a25db7c956423bb9fb4bdd1216c839a689bf9cc15fada0a7fb2f4570"}, + {file = "SQLAlchemy-2.0.25-cp310-cp310-win32.whl", hash = "sha256:14a6f68e8fc96e5e8f5647ef6cda6250c780612a573d99e4d881581432ef1669"}, + {file = "SQLAlchemy-2.0.25-cp310-cp310-win_amd64.whl", hash = "sha256:87f6e732bccd7dcf1741c00f1ecf33797383128bd1c90144ac8adc02cbb98643"}, + {file = "SQLAlchemy-2.0.25-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:342d365988ba88ada8af320d43df4e0b13a694dbd75951f537b2d5e4cb5cd002"}, + {file = "SQLAlchemy-2.0.25-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f37c0caf14b9e9b9e8f6dbc81bc56db06acb4363eba5a633167781a48ef036ed"}, + {file = "SQLAlchemy-2.0.25-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa9373708763ef46782d10e950b49d0235bfe58facebd76917d3f5cbf5971aed"}, + {file = "SQLAlchemy-2.0.25-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d24f571990c05f6b36a396218f251f3e0dda916e0c687ef6fdca5072743208f5"}, + {file = "SQLAlchemy-2.0.25-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:75432b5b14dc2fff43c50435e248b45c7cdadef73388e5610852b95280ffd0e9"}, + {file = "SQLAlchemy-2.0.25-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:884272dcd3ad97f47702965a0e902b540541890f468d24bd1d98bcfe41c3f018"}, + {file = "SQLAlchemy-2.0.25-cp311-cp311-win32.whl", hash = "sha256:e607cdd99cbf9bb80391f54446b86e16eea6ad309361942bf88318bcd452363c"}, + {file = "SQLAlchemy-2.0.25-cp311-cp311-win_amd64.whl", hash = "sha256:7d505815ac340568fd03f719446a589162d55c52f08abd77ba8964fbb7eb5b5f"}, + {file = "SQLAlchemy-2.0.25-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:0dacf67aee53b16f365c589ce72e766efaabd2b145f9de7c917777b575e3659d"}, + {file = "SQLAlchemy-2.0.25-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b801154027107461ee992ff4b5c09aa7cc6ec91ddfe50d02bca344918c3265c6"}, + {file = "SQLAlchemy-2.0.25-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59a21853f5daeb50412d459cfb13cb82c089ad4c04ec208cd14dddd99fc23b39"}, + {file = "SQLAlchemy-2.0.25-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:29049e2c299b5ace92cbed0c1610a7a236f3baf4c6b66eb9547c01179f638ec5"}, + {file = "SQLAlchemy-2.0.25-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b64b183d610b424a160b0d4d880995e935208fc043d0302dd29fee32d1ee3f95"}, + {file = "SQLAlchemy-2.0.25-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4f7a7d7fcc675d3d85fbf3b3828ecd5990b8d61bd6de3f1b260080b3beccf215"}, + {file = "SQLAlchemy-2.0.25-cp312-cp312-win32.whl", hash = "sha256:cf18ff7fc9941b8fc23437cc3e68ed4ebeff3599eec6ef5eebf305f3d2e9a7c2"}, + {file = "SQLAlchemy-2.0.25-cp312-cp312-win_amd64.whl", hash = "sha256:91f7d9d1c4dd1f4f6e092874c128c11165eafcf7c963128f79e28f8445de82d5"}, + {file = "SQLAlchemy-2.0.25-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:bb209a73b8307f8fe4fe46f6ad5979649be01607f11af1eb94aa9e8a3aaf77f0"}, + {file = "SQLAlchemy-2.0.25-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:798f717ae7c806d67145f6ae94dc7c342d3222d3b9a311a784f371a4333212c7"}, + {file = "SQLAlchemy-2.0.25-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fdd402169aa00df3142149940b3bf9ce7dde075928c1886d9a1df63d4b8de62"}, + {file = "SQLAlchemy-2.0.25-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:0d3cab3076af2e4aa5693f89622bef7fa770c6fec967143e4da7508b3dceb9b9"}, + {file = "SQLAlchemy-2.0.25-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:74b080c897563f81062b74e44f5a72fa44c2b373741a9ade701d5f789a10ba23"}, + {file = "SQLAlchemy-2.0.25-cp37-cp37m-win32.whl", hash = "sha256:87d91043ea0dc65ee583026cb18e1b458d8ec5fc0a93637126b5fc0bc3ea68c4"}, + {file = "SQLAlchemy-2.0.25-cp37-cp37m-win_amd64.whl", hash = "sha256:75f99202324383d613ddd1f7455ac908dca9c2dd729ec8584c9541dd41822a2c"}, + {file = "SQLAlchemy-2.0.25-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:420362338681eec03f53467804541a854617faed7272fe71a1bfdb07336a381e"}, + {file = "SQLAlchemy-2.0.25-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7c88f0c7dcc5f99bdb34b4fd9b69b93c89f893f454f40219fe923a3a2fd11625"}, + {file = "SQLAlchemy-2.0.25-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3be4987e3ee9d9a380b66393b77a4cd6d742480c951a1c56a23c335caca4ce3"}, + {file = "SQLAlchemy-2.0.25-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2a159111a0f58fb034c93eeba211b4141137ec4b0a6e75789ab7a3ef3c7e7e3"}, + {file = "SQLAlchemy-2.0.25-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8b8cb63d3ea63b29074dcd29da4dc6a97ad1349151f2d2949495418fd6e48db9"}, + {file = "SQLAlchemy-2.0.25-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:736ea78cd06de6c21ecba7416499e7236a22374561493b456a1f7ffbe3f6cdb4"}, + {file = "SQLAlchemy-2.0.25-cp38-cp38-win32.whl", hash = "sha256:10331f129982a19df4284ceac6fe87353ca3ca6b4ca77ff7d697209ae0a5915e"}, + {file = "SQLAlchemy-2.0.25-cp38-cp38-win_amd64.whl", hash = "sha256:c55731c116806836a5d678a70c84cb13f2cedba920212ba7dcad53260997666d"}, + {file = "SQLAlchemy-2.0.25-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:605b6b059f4b57b277f75ace81cc5bc6335efcbcc4ccb9066695e515dbdb3900"}, + {file = "SQLAlchemy-2.0.25-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:665f0a3954635b5b777a55111ababf44b4fc12b1f3ba0a435b602b6387ffd7cf"}, + {file = "SQLAlchemy-2.0.25-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ecf6d4cda1f9f6cb0b45803a01ea7f034e2f1aed9475e883410812d9f9e3cfcf"}, + {file = "SQLAlchemy-2.0.25-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c51db269513917394faec5e5c00d6f83829742ba62e2ac4fa5c98d58be91662f"}, + {file = "SQLAlchemy-2.0.25-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:790f533fa5c8901a62b6fef5811d48980adeb2f51f1290ade8b5e7ba990ba3de"}, + {file = "SQLAlchemy-2.0.25-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1b1180cda6df7af84fe72e4530f192231b1f29a7496951db4ff38dac1687202d"}, + {file = "SQLAlchemy-2.0.25-cp39-cp39-win32.whl", hash = "sha256:555651adbb503ac7f4cb35834c5e4ae0819aab2cd24857a123370764dc7d7e24"}, + {file = "SQLAlchemy-2.0.25-cp39-cp39-win_amd64.whl", hash = "sha256:dc55990143cbd853a5d038c05e79284baedf3e299661389654551bd02a6a68d7"}, + {file = "SQLAlchemy-2.0.25-py3-none-any.whl", hash = "sha256:a86b4240e67d4753dc3092d9511886795b3c2852abe599cffe108952f7af7ac3"}, + {file = "SQLAlchemy-2.0.25.tar.gz", hash = "sha256:a2c69a7664fb2d54b8682dd774c3b54f67f84fa123cf84dda2a5f40dcaa04e08"}, +] + +[package.dependencies] +greenlet = {version = "!=0.4.17", markers = "platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\""} +typing-extensions = ">=4.6.0" + +[package.extras] +aiomysql = ["aiomysql (>=0.2.0)", "greenlet (!=0.4.17)"] +aioodbc = ["aioodbc", "greenlet (!=0.4.17)"] +aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing_extensions (!=3.10.0.1)"] +asyncio = ["greenlet (!=0.4.17)"] +asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (!=0.4.17)"] +mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5)"] +mssql = ["pyodbc"] +mssql-pymssql = ["pymssql"] +mssql-pyodbc = ["pyodbc"] +mypy = ["mypy (>=0.910)"] +mysql = ["mysqlclient (>=1.4.0)"] +mysql-connector = ["mysql-connector-python"] +oracle = ["cx_oracle (>=8)"] +oracle-oracledb = ["oracledb (>=1.0.1)"] +postgresql = ["psycopg2 (>=2.7)"] +postgresql-asyncpg = ["asyncpg", "greenlet (!=0.4.17)"] +postgresql-pg8000 = ["pg8000 (>=1.29.1)"] +postgresql-psycopg = ["psycopg (>=3.0.7)"] +postgresql-psycopg2binary = ["psycopg2-binary"] +postgresql-psycopg2cffi = ["psycopg2cffi"] +postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"] +pymysql = ["pymysql"] +sqlcipher = ["sqlcipher3_binary"] + +[[package]] +name = "srsly" +version = "2.4.8" +description = "Modern high-performance serialization utilities for Python" +optional = false +python-versions = ">=3.6" +files = [ + {file = "srsly-2.4.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:17f3bcb418bb4cf443ed3d4dcb210e491bd9c1b7b0185e6ab10b6af3271e63b2"}, + {file = "srsly-2.4.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0b070a58e21ab0e878fd949f932385abb4c53dd0acb6d3a7ee75d95d447bc609"}, + {file = "srsly-2.4.8-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98286d20014ed2067ad02b0be1e17c7e522255b188346e79ff266af51a54eb33"}, + {file = "srsly-2.4.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18685084e2e0cc47c25158cbbf3e44690e494ef77d6418c2aae0598c893f35b0"}, + {file = "srsly-2.4.8-cp310-cp310-win_amd64.whl", hash = "sha256:980a179cbf4eb5bc56f7507e53f76720d031bcf0cef52cd53c815720eb2fc30c"}, + {file = "srsly-2.4.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5472ed9f581e10c32e79424c996cf54c46c42237759f4224806a0cd4bb770993"}, + {file = "srsly-2.4.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:50f10afe9230072c5aad9f6636115ea99b32c102f4c61e8236d8642c73ec7a13"}, + {file = "srsly-2.4.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c994a89ba247a4d4f63ef9fdefb93aa3e1f98740e4800d5351ebd56992ac75e3"}, + {file = "srsly-2.4.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ace7ed4a0c20fa54d90032be32f9c656b6d75445168da78d14fe9080a0c208ad"}, + {file = "srsly-2.4.8-cp311-cp311-win_amd64.whl", hash = "sha256:7a919236a090fb93081fbd1cec030f675910f3863825b34a9afbcae71f643127"}, + {file = "srsly-2.4.8-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:7583c03d114b4478b7a357a1915305163e9eac2dfe080da900555c975cca2a11"}, + {file = "srsly-2.4.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:94ccdd2f6db824c31266aaf93e0f31c1c43b8bc531cd2b3a1d924e3c26a4f294"}, + {file = "srsly-2.4.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db72d2974f91aee652d606c7def98744ca6b899bd7dd3009fd75ebe0b5a51034"}, + {file = "srsly-2.4.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a60c905fd2c15e848ce1fc315fd34d8a9cc72c1dee022a0d8f4c62991131307"}, + {file = "srsly-2.4.8-cp312-cp312-win_amd64.whl", hash = "sha256:e0b8d5722057000694edf105b8f492e7eb2f3aa6247a5f0c9170d1e0d074151c"}, + {file = "srsly-2.4.8-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:196b4261f9d6372d1d3d16d1216b90c7e370b4141471322777b7b3c39afd1210"}, + {file = "srsly-2.4.8-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4750017e6d78590b02b12653e97edd25aefa4734281386cc27501d59b7481e4e"}, + {file = "srsly-2.4.8-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa034cd582ba9e4a120c8f19efa263fcad0f10fc481e73fb8c0d603085f941c4"}, + {file = "srsly-2.4.8-cp36-cp36m-win_amd64.whl", hash = "sha256:5a78ab9e9d177ee8731e950feb48c57380036d462b49e3fb61a67ce529ff5f60"}, + {file = "srsly-2.4.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:087e36439af517e259843df93eb34bb9e2d2881c34fa0f541589bcfbc757be97"}, + {file = "srsly-2.4.8-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad141d8a130cb085a0ed3a6638b643e2b591cb98a4591996780597a632acfe20"}, + {file = "srsly-2.4.8-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24d05367b2571c0d08d00459636b951e3ca2a1e9216318c157331f09c33489d3"}, + {file = "srsly-2.4.8-cp37-cp37m-win_amd64.whl", hash = "sha256:3fd661a1c4848deea2849b78f432a70c75d10968e902ca83c07c89c9b7050ab8"}, + {file = "srsly-2.4.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ec37233fe39af97b00bf20dc2ceda04d39b9ea19ce0ee605e16ece9785e11f65"}, + {file = "srsly-2.4.8-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d2fd4bc081f1d6a6063396b6d97b00d98e86d9d3a3ac2949dba574a84e148080"}, + {file = "srsly-2.4.8-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7347cff1eb4ef3fc335d9d4acc89588051b2df43799e5d944696ef43da79c873"}, + {file = "srsly-2.4.8-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a9dc1da5cc94d77056b91ba38365c72ae08556b6345bef06257c7e9eccabafe"}, + {file = "srsly-2.4.8-cp38-cp38-win_amd64.whl", hash = "sha256:dc0bf7b6f23c9ecb49ec0924dc645620276b41e160e9b283ed44ca004c060d79"}, + {file = "srsly-2.4.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ff8df21d00d73c371bead542cefef365ee87ca3a5660de292444021ff84e3b8c"}, + {file = "srsly-2.4.8-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0ac3e340e65a9fe265105705586aa56054dc3902789fcb9a8f860a218d6c0a00"}, + {file = "srsly-2.4.8-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06d1733f4275eff4448e96521cc7dcd8fdabd68ba9b54ca012dcfa2690db2644"}, + {file = "srsly-2.4.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be5b751ad88fdb58fb73871d456248c88204f213aaa3c9aab49b6a1802b3fa8d"}, + {file = "srsly-2.4.8-cp39-cp39-win_amd64.whl", hash = "sha256:822a38b8cf112348f3accbc73274a94b7bf82515cb14a85ba586d126a5a72851"}, + {file = "srsly-2.4.8.tar.gz", hash = "sha256:b24d95a65009c2447e0b49cda043ac53fecf4f09e358d87a57446458f91b8a91"}, +] + +[package.dependencies] +catalogue = ">=2.0.3,<2.1.0" + +[[package]] +name = "stack-data" +version = "0.6.3" +description = "Extract data from python stack frames and tracebacks for informative displays" +optional = true +python-versions = "*" +files = [ + {file = "stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695"}, + {file = "stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9"}, +] + +[package.dependencies] +asttokens = ">=2.1.0" +executing = ">=1.2.0" +pure-eval = "*" + +[package.extras] +tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] + +[[package]] +name = "starlette" +version = "0.36.3" +description = "The little ASGI library that shines." +optional = false +python-versions = ">=3.8" +files = [ + {file = "starlette-0.36.3-py3-none-any.whl", hash = "sha256:13d429aa93a61dc40bf503e8c801db1f1bca3dc706b10ef2434a36123568f044"}, + {file = "starlette-0.36.3.tar.gz", hash = "sha256:90a671733cfb35771d8cc605e0b679d23b992f8dcfad48cc60b38cb29aeb7080"}, +] + +[package.dependencies] +anyio = ">=3.4.0,<5" + +[package.extras] +full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.7)", "pyyaml"] + +[[package]] +name = "sympy" +version = "1.12" +description = "Computer algebra system (CAS) in Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "sympy-1.12-py3-none-any.whl", hash = "sha256:c3588cd4295d0c0f603d0f2ae780587e64e2efeedb3521e46b9bb1d08d184fa5"}, + {file = "sympy-1.12.tar.gz", hash = "sha256:ebf595c8dac3e0fdc4152c51878b498396ec7f30e7a914d6071e674d49420fb8"}, +] + +[package.dependencies] +mpmath = ">=0.19" + +[[package]] +name = "tabulate" +version = "0.9.0" +description = "Pretty-print tabular data" +optional = true +python-versions = ">=3.7" +files = [ + {file = "tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f"}, + {file = "tabulate-0.9.0.tar.gz", hash = "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c"}, +] + +[package.extras] +widechars = ["wcwidth"] + +[[package]] +name = "tenacity" +version = "8.2.3" +description = "Retry code until it succeeds" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tenacity-8.2.3-py3-none-any.whl", hash = "sha256:ce510e327a630c9e1beaf17d42e6ffacc88185044ad85cf74c0a8887c6a0f88c"}, + {file = "tenacity-8.2.3.tar.gz", hash = "sha256:5398ef0d78e63f40007c1fb4c0bff96e1911394d2fa8d194f77619c05ff6cc8a"}, +] + +[package.extras] +doc = ["reno", "sphinx", "tornado (>=4.5)"] + +[[package]] +name = "thinc" +version = "8.1.12" +description = "A refreshing functional take on deep learning, compatible with your favorite libraries" +optional = false +python-versions = ">=3.6" +files = [ + {file = "thinc-8.1.12-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:efda431bc1513e81e457dbff4ef1610592569ddc362f8df24422628b195d51f4"}, + {file = "thinc-8.1.12-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:01dbe9063171c1d0df29374a3857ee500fb8acf8f33bd8a85d11214d7453ff7a"}, + {file = "thinc-8.1.12-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fcfe97b80aa02a6cdeef9f5e3127822a13497a9b6f58653da4ff3caf321e3c4"}, + {file = "thinc-8.1.12-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c52d0657c61b7e1a382cb5ee1ee71692a0e9c47bef9f3e02ac3492b26056d27"}, + {file = "thinc-8.1.12-cp310-cp310-win_amd64.whl", hash = "sha256:b2078018c8bc36540b0c007cb1909f6c81c9a973b3180d15b934414f08988b28"}, + {file = "thinc-8.1.12-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:340171c1927592082c79509e5a964766e2d65c2e30c5e583489488935a9a2340"}, + {file = "thinc-8.1.12-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:88e8c9cd5119d5dbb0c4ed1bdde5acd6cf12fe1b3316647ecbd79fb12e3ef542"}, + {file = "thinc-8.1.12-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15c6cb31138814599426bd8855b9fc9d8d8ddb2bde1c91d204353b5e5af15deb"}, + {file = "thinc-8.1.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5dc3117db83ec0d423480b6c77de90f658dfaed5f7a2bbc3d640f1f6c7ff0fe7"}, + {file = "thinc-8.1.12-cp311-cp311-win_amd64.whl", hash = "sha256:f9ac43fd02e952c005753f85bd375c03baea5fa818a6a4942930177c31130eca"}, + {file = "thinc-8.1.12-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4241d0b8c9e813a1fbba05b6dc7d7056c0a2601b8a1119d372e85185068009e6"}, + {file = "thinc-8.1.12-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c141e42e610605a9c6def19e5dbb4877353839a610e3cdb1fa68e70f6b39492a"}, + {file = "thinc-8.1.12-cp36-cp36m-win_amd64.whl", hash = "sha256:9388c1427b4c3615967e1be19fa93427be61241392bdd5a84ab1da0f96c6bcfb"}, + {file = "thinc-8.1.12-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f6fb12692fae1a056432800f94ec88fa714eb1111aff9eabd61d2dfe10beb713"}, + {file = "thinc-8.1.12-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e51c693d477e02eab164a67b588fcdbb3609bc54ec39de6084da2dd9a356b8f8"}, + {file = "thinc-8.1.12-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4265f902f9a597be294765479ef6535d679e497fa2fed955cbcabcfdd82f81ad"}, + {file = "thinc-8.1.12-cp37-cp37m-win_amd64.whl", hash = "sha256:4586d6709f3811db85e192fdf519620b3326d28e5f0193cef8544b057e20a951"}, + {file = "thinc-8.1.12-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e10a648872e9ebbe115fa5fba0d515e8226bd0e2de0abd41d55f1ae04017813c"}, + {file = "thinc-8.1.12-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:27231eb1d468e7eb97f255c3d1e985d5a0cb8e309e0ec01b29cce2de836b8db2"}, + {file = "thinc-8.1.12-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c8ece3880ac05d6bb75ecdbd9c03298e6f9691e5cb7480c1f15e66e33fe34004"}, + {file = "thinc-8.1.12-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:285f1141ecd7a9b61e2fed58b609c194b40e6ae5daf1e1e8dec31616bc9ffca1"}, + {file = "thinc-8.1.12-cp38-cp38-win_amd64.whl", hash = "sha256:0400632aa235cfbbc0004014e90cdf54cd42333aa7f5e971ffe87c8125e607ed"}, + {file = "thinc-8.1.12-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2edb3ef3a02f966eae8c5c56feb80ad5b6e5c221c94fcd95eb413d09d0d82212"}, + {file = "thinc-8.1.12-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e078d3b00e51c597f3f301d3e2925d0842d0725f251ff9a53a1e1b4110d4b9c1"}, + {file = "thinc-8.1.12-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7d0ac2f6a0b38ddb913f9b31d8c4b13b98a7f5f62db211e0d8ebefbda5138757"}, + {file = "thinc-8.1.12-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47cde897cf54bc731a3a7c2e51a6ef01a86687ab7ae90ab0e9fc5d2294fe0fba"}, + {file = "thinc-8.1.12-cp39-cp39-win_amd64.whl", hash = "sha256:1b846c35a24b5b33e5d240f514f3a9e8bac2b6a10491caa147753dc50740a400"}, + {file = "thinc-8.1.12.tar.gz", hash = "sha256:9dd12c5c79b176f077ce9416b49c9752782bd76ff0ea649d66527882e83ea353"}, +] + +[package.dependencies] +blis = ">=0.7.8,<0.8.0" +catalogue = ">=2.0.4,<2.1.0" +confection = ">=0.0.1,<1.0.0" +cymem = ">=2.0.2,<2.1.0" +murmurhash = ">=1.0.2,<1.1.0" +numpy = {version = ">=1.19.0", markers = "python_version >= \"3.9\""} +packaging = ">=20.0" +preshed = ">=3.0.2,<3.1.0" +pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<3.0.0" +setuptools = "*" +srsly = ">=2.4.0,<3.0.0" +wasabi = ">=0.8.1,<1.2.0" + +[package.extras] +cuda = ["cupy (>=5.0.0b4)"] +cuda-autodetect = ["cupy-wheel (>=11.0.0)"] +cuda100 = ["cupy-cuda100 (>=5.0.0b4)"] +cuda101 = ["cupy-cuda101 (>=5.0.0b4)"] +cuda102 = ["cupy-cuda102 (>=5.0.0b4)"] +cuda110 = ["cupy-cuda110 (>=5.0.0b4)"] +cuda111 = ["cupy-cuda111 (>=5.0.0b4)"] +cuda112 = ["cupy-cuda112 (>=5.0.0b4)"] +cuda113 = ["cupy-cuda113 (>=5.0.0b4)"] +cuda114 = ["cupy-cuda114 (>=5.0.0b4)"] +cuda115 = ["cupy-cuda115 (>=5.0.0b4)"] +cuda116 = ["cupy-cuda116 (>=5.0.0b4)"] +cuda117 = ["cupy-cuda117 (>=5.0.0b4)"] +cuda11x = ["cupy-cuda11x (>=11.0.0)"] +cuda80 = ["cupy-cuda80 (>=5.0.0b4)"] +cuda90 = ["cupy-cuda90 (>=5.0.0b4)"] +cuda91 = ["cupy-cuda91 (>=5.0.0b4)"] +cuda92 = ["cupy-cuda92 (>=5.0.0b4)"] +datasets = ["ml-datasets (>=0.2.0,<0.3.0)"] +mxnet = ["mxnet (>=1.5.1,<1.6.0)"] +tensorflow = ["tensorflow (>=2.0.0,<2.6.0)"] +torch = ["torch (>=1.6.0)"] + +[[package]] +name = "tiktoken" +version = "0.5.2" +description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models" +optional = false +python-versions = ">=3.8" +files = [ + {file = "tiktoken-0.5.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8c4e654282ef05ec1bd06ead22141a9a1687991cef2c6a81bdd1284301abc71d"}, + {file = "tiktoken-0.5.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7b3134aa24319f42c27718c6967f3c1916a38a715a0fa73d33717ba121231307"}, + {file = "tiktoken-0.5.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6092e6e77730929c8c6a51bb0d7cfdf1b72b63c4d033d6258d1f2ee81052e9e5"}, + {file = "tiktoken-0.5.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72ad8ae2a747622efae75837abba59be6c15a8f31b4ac3c6156bc56ec7a8e631"}, + {file = "tiktoken-0.5.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:51cba7c8711afa0b885445f0637f0fcc366740798c40b981f08c5f984e02c9d1"}, + {file = "tiktoken-0.5.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3d8c7d2c9313f8e92e987d585ee2ba0f7c40a0de84f4805b093b634f792124f5"}, + {file = "tiktoken-0.5.2-cp310-cp310-win_amd64.whl", hash = "sha256:692eca18c5fd8d1e0dde767f895c17686faaa102f37640e884eecb6854e7cca7"}, + {file = "tiktoken-0.5.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:138d173abbf1ec75863ad68ca289d4da30caa3245f3c8d4bfb274c4d629a2f77"}, + {file = "tiktoken-0.5.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7388fdd684690973fdc450b47dfd24d7f0cbe658f58a576169baef5ae4658607"}, + {file = "tiktoken-0.5.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a114391790113bcff670c70c24e166a841f7ea8f47ee2fe0e71e08b49d0bf2d4"}, + {file = "tiktoken-0.5.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca96f001e69f6859dd52926d950cfcc610480e920e576183497ab954e645e6ac"}, + {file = "tiktoken-0.5.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:15fed1dd88e30dfadcdd8e53a8927f04e1f6f81ad08a5ca824858a593ab476c7"}, + {file = "tiktoken-0.5.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:93f8e692db5756f7ea8cb0cfca34638316dcf0841fb8469de8ed7f6a015ba0b0"}, + {file = "tiktoken-0.5.2-cp311-cp311-win_amd64.whl", hash = "sha256:bcae1c4c92df2ffc4fe9f475bf8148dbb0ee2404743168bbeb9dcc4b79dc1fdd"}, + {file = "tiktoken-0.5.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b76a1e17d4eb4357d00f0622d9a48ffbb23401dcf36f9716d9bd9c8e79d421aa"}, + {file = "tiktoken-0.5.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:01d8b171bb5df4035580bc26d4f5339a6fd58d06f069091899d4a798ea279d3e"}, + {file = "tiktoken-0.5.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42adf7d4fb1ed8de6e0ff2e794a6a15005f056a0d83d22d1d6755a39bffd9e7f"}, + {file = "tiktoken-0.5.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c3f894dbe0adb44609f3d532b8ea10820d61fdcb288b325a458dfc60fefb7db"}, + {file = "tiktoken-0.5.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:58ccfddb4e62f0df974e8f7e34a667981d9bb553a811256e617731bf1d007d19"}, + {file = "tiktoken-0.5.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58902a8bad2de4268c2a701f1c844d22bfa3cbcc485b10e8e3e28a050179330b"}, + {file = "tiktoken-0.5.2-cp312-cp312-win_amd64.whl", hash = "sha256:5e39257826d0647fcac403d8fa0a474b30d02ec8ffc012cfaf13083e9b5e82c5"}, + {file = "tiktoken-0.5.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8bde3b0fbf09a23072d39c1ede0e0821f759b4fa254a5f00078909158e90ae1f"}, + {file = "tiktoken-0.5.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2ddee082dcf1231ccf3a591d234935e6acf3e82ee28521fe99af9630bc8d2a60"}, + {file = "tiktoken-0.5.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35c057a6a4e777b5966a7540481a75a31429fc1cb4c9da87b71c8b75b5143037"}, + {file = "tiktoken-0.5.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c4a049b87e28f1dc60509f8eb7790bc8d11f9a70d99b9dd18dfdd81a084ffe6"}, + {file = "tiktoken-0.5.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5bf5ce759089f4f6521ea6ed89d8f988f7b396e9f4afb503b945f5c949c6bec2"}, + {file = "tiktoken-0.5.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0c964f554af1a96884e01188f480dad3fc224c4bbcf7af75d4b74c4b74ae0125"}, + {file = "tiktoken-0.5.2-cp38-cp38-win_amd64.whl", hash = "sha256:368dd5726d2e8788e47ea04f32e20f72a2012a8a67af5b0b003d1e059f1d30a3"}, + {file = "tiktoken-0.5.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a2deef9115b8cd55536c0a02c0203512f8deb2447f41585e6d929a0b878a0dd2"}, + {file = "tiktoken-0.5.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2ed7d380195affbf886e2f8b92b14edfe13f4768ff5fc8de315adba5b773815e"}, + {file = "tiktoken-0.5.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c76fce01309c8140ffe15eb34ded2bb94789614b7d1d09e206838fc173776a18"}, + {file = "tiktoken-0.5.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60a5654d6a2e2d152637dd9a880b4482267dfc8a86ccf3ab1cec31a8c76bfae8"}, + {file = "tiktoken-0.5.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:41d4d3228e051b779245a8ddd21d4336f8975563e92375662f42d05a19bdff41"}, + {file = "tiktoken-0.5.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a5c1cdec2c92fcde8c17a50814b525ae6a88e8e5b02030dc120b76e11db93f13"}, + {file = "tiktoken-0.5.2-cp39-cp39-win_amd64.whl", hash = "sha256:84ddb36faedb448a50b246e13d1b6ee3437f60b7169b723a4b2abad75e914f3e"}, + {file = "tiktoken-0.5.2.tar.gz", hash = "sha256:f54c581f134a8ea96ce2023ab221d4d4d81ab614efa0b2fbce926387deb56c80"}, +] + +[package.dependencies] +regex = ">=2022.1.18" +requests = ">=2.26.0" + +[package.extras] +blobfile = ["blobfile (>=2)"] + +[[package]] +name = "tokenizers" +version = "0.15.0" +description = "" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tokenizers-0.15.0-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:cd3cd0299aaa312cd2988957598f80becd04d5a07338741eca076057a2b37d6e"}, + {file = "tokenizers-0.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a922c492c721744ee175f15b91704be2d305569d25f0547c77cd6c9f210f9dc"}, + {file = "tokenizers-0.15.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:331dd786d02fc38698f835fff61c99480f98b73ce75a4c65bd110c9af5e4609a"}, + {file = "tokenizers-0.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88dd0961c437d413ab027f8b115350c121d49902cfbadf08bb8f634b15fa1814"}, + {file = "tokenizers-0.15.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6fdcc55339df7761cd52e1fbe8185d3b3963bc9e3f3545faa6c84f9e8818259a"}, + {file = "tokenizers-0.15.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1480b0051d8ab5408e8e4db2dc832f7082ea24aa0722c427bde2418c6f3bd07"}, + {file = "tokenizers-0.15.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9855e6c258918f9cf62792d4f6ddfa6c56dccd8c8118640f867f6393ecaf8bd7"}, + {file = "tokenizers-0.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de9529fe75efcd54ba8d516aa725e1851df9199f0669b665c55e90df08f5af86"}, + {file = "tokenizers-0.15.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:8edcc90a36eab0705fe9121d6c77c6e42eeef25c7399864fd57dfb27173060bf"}, + {file = "tokenizers-0.15.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ae17884aafb3e94f34fb7cfedc29054f5f54e142475ebf8a265a4e388fee3f8b"}, + {file = "tokenizers-0.15.0-cp310-none-win32.whl", hash = "sha256:9a3241acdc9b44cff6e95c4a55b9be943ef3658f8edb3686034d353734adba05"}, + {file = "tokenizers-0.15.0-cp310-none-win_amd64.whl", hash = "sha256:4b31807cb393d6ea31926b307911c89a1209d5e27629aa79553d1599c8ffdefe"}, + {file = "tokenizers-0.15.0-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:af7e9be8c05d30bb137b9fd20f9d99354816599e5fd3d58a4b1e28ba3b36171f"}, + {file = "tokenizers-0.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c3d7343fa562ea29661783344a2d83662db0d3d17a6fa6a403cac8e512d2d9fd"}, + {file = "tokenizers-0.15.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:32371008788aeeb0309a9244809a23e4c0259625e6b74a103700f6421373f395"}, + {file = "tokenizers-0.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca9db64c7c9954fbae698884c5bb089764edc549731e5f9b7fa1dd4e4d78d77f"}, + {file = "tokenizers-0.15.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dbed5944c31195514669cf6381a0d8d47f164943000d10f93d6d02f0d45c25e0"}, + {file = "tokenizers-0.15.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aab16c4a26d351d63e965b0c792f5da7227a37b69a6dc6d922ff70aa595b1b0c"}, + {file = "tokenizers-0.15.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3c2b60b12fdd310bf85ce5d7d3f823456b9b65eed30f5438dd7761879c495983"}, + {file = "tokenizers-0.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0344d6602740e44054a9e5bbe9775a5e149c4dddaff15959bb07dcce95a5a859"}, + {file = "tokenizers-0.15.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4525f6997d81d9b6d9140088f4f5131f6627e4c960c2c87d0695ae7304233fc3"}, + {file = "tokenizers-0.15.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:65975094fef8cc68919644936764efd2ce98cf1bacbe8db2687155d2b0625bee"}, + {file = "tokenizers-0.15.0-cp311-none-win32.whl", hash = "sha256:ff5d2159c5d93015f5a4542aac6c315506df31853123aa39042672031768c301"}, + {file = "tokenizers-0.15.0-cp311-none-win_amd64.whl", hash = "sha256:2dd681b53cf615e60a31a115a3fda3980e543d25ca183797f797a6c3600788a3"}, + {file = "tokenizers-0.15.0-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:c9cce6ee149a3d703f86877bc2a6d997e34874b2d5a2d7839e36b2273f31d3d9"}, + {file = "tokenizers-0.15.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a0a94bc3370e6f1cc8a07a8ae867ce13b7c1b4291432a773931a61f256d44ea"}, + {file = "tokenizers-0.15.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:309cfcccfc7e502cb1f1de2c9c1c94680082a65bfd3a912d5a5b2c90c677eb60"}, + {file = "tokenizers-0.15.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8413e994dd7d875ab13009127fc85633916c71213917daf64962bafd488f15dc"}, + {file = "tokenizers-0.15.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d0ebf9430f901dbdc3dcb06b493ff24a3644c9f88c08e6a1d6d0ae2228b9b818"}, + {file = "tokenizers-0.15.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:10361e9c7864b22dd791ec5126327f6c9292fb1d23481d4895780688d5e298ac"}, + {file = "tokenizers-0.15.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:babe42635b8a604c594bdc56d205755f73414fce17ba8479d142a963a6c25cbc"}, + {file = "tokenizers-0.15.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3768829861e964c7a4556f5f23307fce6a23872c2ebf030eb9822dbbbf7e9b2a"}, + {file = "tokenizers-0.15.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9c91588a630adc88065e1c03ac6831e3e2112558869b9ebcb2b8afd8a14c944d"}, + {file = "tokenizers-0.15.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:77606994e793ca54ecf3a3619adc8a906a28ca223d9354b38df41cb8766a0ed6"}, + {file = "tokenizers-0.15.0-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:6fe143939f3b596681922b2df12a591a5b010e7dcfbee2202482cd0c1c2f2459"}, + {file = "tokenizers-0.15.0-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:b7bee0f1795e3e3561e9a557061b1539e5255b8221e3f928f58100282407e090"}, + {file = "tokenizers-0.15.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5d37e7f4439b4c46192ab4f2ff38ab815e4420f153caa13dec9272ef14403d34"}, + {file = "tokenizers-0.15.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:caadf255cf7f951b38d10097836d1f3bcff4aeaaffadfdf748bab780bf5bff95"}, + {file = "tokenizers-0.15.0-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:05accb9162bf711a941b1460b743d62fec61c160daf25e53c5eea52c74d77814"}, + {file = "tokenizers-0.15.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:26a2ef890740127cb115ee5260878f4a677e36a12831795fd7e85887c53b430b"}, + {file = "tokenizers-0.15.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e54c5f26df14913620046b33e822cb3bcd091a332a55230c0e63cc77135e2169"}, + {file = "tokenizers-0.15.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:669b8ed653a578bcff919566631156f5da3aab84c66f3c0b11a6281e8b4731c7"}, + {file = "tokenizers-0.15.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:0ea480d943297df26f06f508dab6e012b07f42bf3dffdd36e70799368a5f5229"}, + {file = "tokenizers-0.15.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:bc80a0a565ebfc7cd89de7dd581da8c2b3238addfca6280572d27d763f135f2f"}, + {file = "tokenizers-0.15.0-cp37-none-win32.whl", hash = "sha256:cdd945e678bbdf4517d5d8de66578a5030aeefecdb46f5320b034de9cad8d4dd"}, + {file = "tokenizers-0.15.0-cp37-none-win_amd64.whl", hash = "sha256:1ab96ab7dc706e002c32b2ea211a94c1c04b4f4de48354728c3a6e22401af322"}, + {file = "tokenizers-0.15.0-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:f21c9eb71c9a671e2a42f18b456a3d118e50c7f0fc4dd9fa8f4eb727fea529bf"}, + {file = "tokenizers-0.15.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2a5f4543a35889679fc3052086e69e81880b2a5a28ff2a52c5a604be94b77a3f"}, + {file = "tokenizers-0.15.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f8aa81afec893e952bd39692b2d9ef60575ed8c86fce1fd876a06d2e73e82dca"}, + {file = "tokenizers-0.15.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1574a5a4af22c3def93fe8fe4adcc90a39bf5797ed01686a4c46d1c3bc677d2f"}, + {file = "tokenizers-0.15.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7c7982fd0ec9e9122d03b209dac48cebfea3de0479335100ef379a9a959b9a5a"}, + {file = "tokenizers-0.15.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f8d16b647032df2ce2c1f9097236e046ea9fedd969b25637b9d5d734d78aa53b"}, + {file = "tokenizers-0.15.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b3cdf29e6f9653da330515dc8fa414be5a93aae79e57f8acc50d4028dd843edf"}, + {file = "tokenizers-0.15.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7286f3df10de840867372e3e64b99ef58c677210e3ceb653cd0e740a5c53fe78"}, + {file = "tokenizers-0.15.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:aabc83028baa5a36ce7a94e7659250f0309c47fa4a639e5c2c38e6d5ea0de564"}, + {file = "tokenizers-0.15.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:72f78b0e0e276b1fc14a672fa73f3acca034ba8db4e782124a2996734a9ba9cf"}, + {file = "tokenizers-0.15.0-cp38-none-win32.whl", hash = "sha256:9680b0ecc26e7e42f16680c1aa62e924d58d1c2dd992707081cc10a374896ea2"}, + {file = "tokenizers-0.15.0-cp38-none-win_amd64.whl", hash = "sha256:f17cbd88dab695911cbdd385a5a7e3709cc61dff982351f5d1b5939f074a2466"}, + {file = "tokenizers-0.15.0-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:3661862df7382c5eb23ac4fbf7c75e69b02dc4f5784e4c5a734db406b5b24596"}, + {file = "tokenizers-0.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c3045d191dad49647f5a5039738ecf1c77087945c7a295f7bcf051c37067e883"}, + {file = "tokenizers-0.15.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:a9fcaad9ab0801f14457d7c820d9f246b5ab590c407fc6b073819b1573097aa7"}, + {file = "tokenizers-0.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a79f17027f24fe9485701c8dbb269b9c713954ec3bdc1e7075a66086c0c0cd3c"}, + {file = "tokenizers-0.15.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:01a3aa332abc4bee7640563949fcfedca4de8f52691b3b70f2fc6ca71bfc0f4e"}, + {file = "tokenizers-0.15.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05b83896a893cdfedad8785250daa3ba9f0504848323471524d4783d7291661e"}, + {file = "tokenizers-0.15.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cbbf2489fcf25d809731ba2744ff278dd07d9eb3f8b7482726bd6cae607073a4"}, + {file = "tokenizers-0.15.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab806ad521a5e9de38078b7add97589c313915f6f5fec6b2f9f289d14d607bd6"}, + {file = "tokenizers-0.15.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4a522612d5c88a41563e3463226af64e2fa00629f65cdcc501d1995dd25d23f5"}, + {file = "tokenizers-0.15.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e58a38c4e6075810bdfb861d9c005236a72a152ebc7005941cc90d1bbf16aca9"}, + {file = "tokenizers-0.15.0-cp39-none-win32.whl", hash = "sha256:b8034f1041fd2bd2b84ff9f4dc4ae2e1c3b71606820a9cd5c562ebd291a396d1"}, + {file = "tokenizers-0.15.0-cp39-none-win_amd64.whl", hash = "sha256:edde9aa964145d528d0e0dbf14f244b8a85ebf276fb76869bc02e2530fa37a96"}, + {file = "tokenizers-0.15.0-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:309445d10d442b7521b98083dc9f0b5df14eca69dbbfebeb98d781ee2cef5d30"}, + {file = "tokenizers-0.15.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d3125a6499226d4d48efc54f7498886b94c418e93a205b673bc59364eecf0804"}, + {file = "tokenizers-0.15.0-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:ed56ddf0d54877bb9c6d885177db79b41576e61b5ef6defeb579dcb803c04ad5"}, + {file = "tokenizers-0.15.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b22cd714706cc5b18992a232b023f736e539495f5cc61d2d28d176e55046f6c"}, + {file = "tokenizers-0.15.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fac2719b1e9bc8e8e7f6599b99d0a8e24f33d023eb8ef644c0366a596f0aa926"}, + {file = "tokenizers-0.15.0-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:85ddae17570ec7e5bfaf51ffa78d044f444a8693e1316e1087ee6150596897ee"}, + {file = "tokenizers-0.15.0-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:76f1bed992e396bf6f83e3df97b64ff47885e45e8365f8983afed8556a0bc51f"}, + {file = "tokenizers-0.15.0-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:3bb0f4df6dce41a1c7482087b60d18c372ef4463cb99aa8195100fcd41e0fd64"}, + {file = "tokenizers-0.15.0-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:22c27672c27a059a5f39ff4e49feed8c7f2e1525577c8a7e3978bd428eb5869d"}, + {file = "tokenizers-0.15.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78104f5d035c9991f92831fc0efe9e64a05d4032194f2a69f67aaa05a4d75bbb"}, + {file = "tokenizers-0.15.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a40b73dc19d82c3e3ffb40abdaacca8fbc95eeb26c66b7f9f860aebc07a73998"}, + {file = "tokenizers-0.15.0-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d801d1368188c74552cd779b1286e67cb9fd96f4c57a9f9a2a09b6def9e1ab37"}, + {file = "tokenizers-0.15.0-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82641ffb13a4da1293fcc9f437d457647e60ed0385a9216cd135953778b3f0a1"}, + {file = "tokenizers-0.15.0-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:160f9d1810f2c18fffa94aa98bf17632f6bd2dabc67fcb01a698ca80c37d52ee"}, + {file = "tokenizers-0.15.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:8d7d6eea831ed435fdeeb9bcd26476226401d7309d115a710c65da4088841948"}, + {file = "tokenizers-0.15.0-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f6456bec6c557d63d8ec0023758c32f589e1889ed03c055702e84ce275488bed"}, + {file = "tokenizers-0.15.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1eef39a502fad3bf104b9e1906b4fb0cee20e44e755e51df9a98f8922c3bf6d4"}, + {file = "tokenizers-0.15.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c1e4664c5b797e093c19b794bbecc19d2367e782b4a577d8b7c1821db5dc150d"}, + {file = "tokenizers-0.15.0-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:ca003fb5f3995ff5cf676db6681b8ea5d54d3b30bea36af1120e78ee1a4a4cdf"}, + {file = "tokenizers-0.15.0-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:7f17363141eb0c53752c89e10650b85ef059a52765d0802ba9613dbd2d21d425"}, + {file = "tokenizers-0.15.0-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:8a765db05581c7d7e1280170f2888cda351760d196cc059c37ea96f121125799"}, + {file = "tokenizers-0.15.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:2a0dd641a72604486cd7302dd8f87a12c8a9b45e1755e47d2682733f097c1af5"}, + {file = "tokenizers-0.15.0-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0a1a3c973e4dc97797fc19e9f11546c95278ffc55c4492acb742f69e035490bc"}, + {file = "tokenizers-0.15.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4fab75642aae4e604e729d6f78e0addb9d7e7d49e28c8f4d16b24da278e5263"}, + {file = "tokenizers-0.15.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65f80be77f6327a86d8fd35a4467adcfe6174c159b4ab52a1a8dd4c6f2d7d9e1"}, + {file = "tokenizers-0.15.0-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:a8da7533dbe66b88afd430c56a2f2ce1fd82e2681868f857da38eeb3191d7498"}, + {file = "tokenizers-0.15.0-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fa8eb4584fc6cbe6a84d7a7864be3ed28e23e9fd2146aa8ef1814d579df91958"}, + {file = "tokenizers-0.15.0.tar.gz", hash = "sha256:10c7e6e7b4cabd757da59e93f5f8d1126291d16f8b54f28510825ef56a3e5d0e"}, +] + +[package.dependencies] +huggingface_hub = ">=0.16.4,<1.0" + +[package.extras] +dev = ["tokenizers[testing]"] +docs = ["setuptools_rust", "sphinx", "sphinx_rtd_theme"] +testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests"] + +[[package]] +name = "toml" +version = "0.10.2" +description = "Python Library for Tom's Obvious, Minimal Language" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, + {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, +] + +[[package]] +name = "tomli" +version = "2.0.1" +description = "A lil' TOML parser" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, +] + +[[package]] +name = "tqdm" +version = "4.66.1" +description = "Fast, Extensible Progress Meter" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tqdm-4.66.1-py3-none-any.whl", hash = "sha256:d302b3c5b53d47bce91fea46679d9c3c6508cf6332229aa1e7d8653723793386"}, + {file = "tqdm-4.66.1.tar.gz", hash = "sha256:d88e651f9db8d8551a62556d3cff9e3034274ca5d66e93197cf2490e2dcb69c7"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[package.extras] +dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"] +notebook = ["ipywidgets (>=6)"] +slack = ["slack-sdk"] +telegram = ["requests"] + +[[package]] +name = "traitlets" +version = "5.14.1" +description = "Traitlets Python configuration system" +optional = true +python-versions = ">=3.8" +files = [ + {file = "traitlets-5.14.1-py3-none-any.whl", hash = "sha256:2e5a030e6eff91737c643231bfcf04a65b0132078dad75e4936700b213652e74"}, + {file = "traitlets-5.14.1.tar.gz", hash = "sha256:8585105b371a04b8316a43d5ce29c098575c2e477850b62b848b964f1444527e"}, +] + +[package.extras] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] +test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,<7.5)", "pytest-mock", "pytest-mypy-testing"] + +[[package]] +name = "trio" +version = "0.24.0" +description = "A friendly Python library for async concurrency and I/O" +optional = false +python-versions = ">=3.8" +files = [ + {file = "trio-0.24.0-py3-none-any.whl", hash = "sha256:c3bd3a4e3e3025cd9a2241eae75637c43fe0b9e88b4c97b9161a55b9e54cd72c"}, + {file = "trio-0.24.0.tar.gz", hash = "sha256:ffa09a74a6bf81b84f8613909fb0beaee84757450183a7a2e0b47b455c0cac5d"}, +] + +[package.dependencies] +attrs = ">=20.1.0" +cffi = {version = ">=1.14", markers = "os_name == \"nt\" and implementation_name != \"pypy\""} +exceptiongroup = {version = "*", markers = "python_version < \"3.11\""} +idna = "*" +outcome = "*" +sniffio = ">=1.3.0" +sortedcontainers = "*" + +[[package]] +name = "trio-websocket" +version = "0.11.1" +description = "WebSocket library for Trio" +optional = false +python-versions = ">=3.7" +files = [ + {file = "trio-websocket-0.11.1.tar.gz", hash = "sha256:18c11793647703c158b1f6e62de638acada927344d534e3c7628eedcb746839f"}, + {file = "trio_websocket-0.11.1-py3-none-any.whl", hash = "sha256:520d046b0d030cf970b8b2b2e00c4c2245b3807853ecd44214acd33d74581638"}, +] + +[package.dependencies] +exceptiongroup = {version = "*", markers = "python_version < \"3.11\""} +trio = ">=0.11" +wsproto = ">=0.14" + +[[package]] +name = "typer" +version = "0.9.0" +description = "Typer, build great CLIs. Easy to code. Based on Python type hints." +optional = false +python-versions = ">=3.6" +files = [ + {file = "typer-0.9.0-py3-none-any.whl", hash = "sha256:5d96d986a21493606a358cae4461bd8cdf83cbf33a5aa950ae629ca3b51467ee"}, + {file = "typer-0.9.0.tar.gz", hash = "sha256:50922fd79aea2f4751a8e0408ff10d2662bd0c8bbfa84755a699f3bada2978b2"}, +] + +[package.dependencies] +click = ">=7.1.1,<9.0.0" +typing-extensions = ">=3.7.4.3" + +[package.extras] +all = ["colorama (>=0.4.3,<0.5.0)", "rich (>=10.11.0,<14.0.0)", "shellingham (>=1.3.0,<2.0.0)"] +dev = ["autoflake (>=1.3.1,<2.0.0)", "flake8 (>=3.8.3,<4.0.0)", "pre-commit (>=2.17.0,<3.0.0)"] +doc = ["cairosvg (>=2.5.2,<3.0.0)", "mdx-include (>=1.4.1,<2.0.0)", "mkdocs (>=1.1.2,<2.0.0)", "mkdocs-material (>=8.1.4,<9.0.0)", "pillow (>=9.3.0,<10.0.0)"] +test = ["black (>=22.3.0,<23.0.0)", "coverage (>=6.2,<7.0)", "isort (>=5.0.6,<6.0.0)", "mypy (==0.910)", "pytest (>=4.4.0,<8.0.0)", "pytest-cov (>=2.10.0,<5.0.0)", "pytest-sugar (>=0.9.4,<0.10.0)", "pytest-xdist (>=1.32.0,<4.0.0)", "rich (>=10.11.0,<14.0.0)", "shellingham (>=1.3.0,<2.0.0)"] + +[[package]] +name = "types-awscrt" +version = "0.20.0" +description = "Type annotations and code completion for awscrt" +optional = false +python-versions = ">=3.7,<4.0" +files = [ + {file = "types_awscrt-0.20.0-py3-none-any.whl", hash = "sha256:e872b65d041687ec7fb49fb4dcb871ff10ade5efeca02722e037a03bff81db7e"}, + {file = "types_awscrt-0.20.0.tar.gz", hash = "sha256:99778c952e1eae10cc7a53468413001177026c9434345bf00120bb2ea5b79109"}, +] + +[[package]] +name = "types-beautifulsoup4" +version = "4.12.0.20240106" +description = "Typing stubs for beautifulsoup4" +optional = false +python-versions = ">=3.8" +files = [ + {file = "types-beautifulsoup4-4.12.0.20240106.tar.gz", hash = "sha256:98d628985b71b140bd3bc22a8cb0ab603c2f2d08f20d37925965eb4a21739be8"}, + {file = "types_beautifulsoup4-4.12.0.20240106-py3-none-any.whl", hash = "sha256:cbdd60ab8aeac737ac014431b6e921b43e84279c0405fdd25a6900bb0e71da5b"}, +] + +[package.dependencies] +types-html5lib = "*" + +[[package]] +name = "types-colorama" +version = "0.4.15.20240106" +description = "Typing stubs for colorama" +optional = false +python-versions = ">=3.8" +files = [ + {file = "types-colorama-0.4.15.20240106.tar.gz", hash = "sha256:49096b4c4cbfcaa11699a0470c36e4f5631f193fb980188e013ea64445d35656"}, + {file = "types_colorama-0.4.15.20240106-py3-none-any.whl", hash = "sha256:18294bc18f60dc0b4895de8119964a5d895f5e180c2d1308fdd33009c0fa0f38"}, +] + +[[package]] +name = "types-html5lib" +version = "1.1.11.20240106" +description = "Typing stubs for html5lib" +optional = false +python-versions = ">=3.8" +files = [ + {file = "types-html5lib-1.1.11.20240106.tar.gz", hash = "sha256:fc3a1b18eb601b3eeaf92c900bd67675c0a4fa1dd1d2a2893ebdb46923547ee9"}, + {file = "types_html5lib-1.1.11.20240106-py3-none-any.whl", hash = "sha256:61993cb89220107481e0f1da65c388ff8cf3d8c5f6e8483c97559639a596b697"}, +] + +[[package]] +name = "types-markdown" +version = "3.5.0.20240106" +description = "Typing stubs for Markdown" +optional = false +python-versions = ">=3.8" +files = [ + {file = "types-Markdown-3.5.0.20240106.tar.gz", hash = "sha256:be47d35cbe61d458bd17aec127f1da233cd6ed96fa9a131c710378a4e8857030"}, + {file = "types_Markdown-3.5.0.20240106-py3-none-any.whl", hash = "sha256:c23569d33718475dfae25c0036c6e6866f409e7077ee8a0728ab3db263d8e4a5"}, +] + +[[package]] +name = "types-pillow" +version = "10.2.0.20240111" +description = "Typing stubs for Pillow" +optional = false +python-versions = ">=3.8" +files = [ + {file = "types-Pillow-10.2.0.20240111.tar.gz", hash = "sha256:e8d359bfdc5a149a3c90a7e153cb2d0750ddf7fc3508a20dfadabd8a9435e354"}, + {file = "types_Pillow-10.2.0.20240111-py3-none-any.whl", hash = "sha256:1f4243b30c143b56b0646626f052e4269123e550f9096cdfb5fbd999daee7dbb"}, +] + +[[package]] +name = "types-requests" +version = "2.31.0.6" +description = "Typing stubs for requests" +optional = true +python-versions = ">=3.7" +files = [ + {file = "types-requests-2.31.0.6.tar.gz", hash = "sha256:cd74ce3b53c461f1228a9b783929ac73a666658f223e28ed29753771477b3bd0"}, + {file = "types_requests-2.31.0.6-py3-none-any.whl", hash = "sha256:a2db9cb228a81da8348b49ad6db3f5519452dd20a9c1e1a868c83c5fe88fd1a9"}, +] + +[package.dependencies] +types-urllib3 = "*" + +[[package]] +name = "types-s3transfer" +version = "0.10.0" +description = "Type annotations and code completion for s3transfer" +optional = false +python-versions = ">=3.7,<4.0" +files = [ + {file = "types_s3transfer-0.10.0-py3-none-any.whl", hash = "sha256:44fcdf0097b924a9aab1ee4baa1179081a9559ca62a88c807e2b256893ce688f"}, + {file = "types_s3transfer-0.10.0.tar.gz", hash = "sha256:35e4998c25df7f8985ad69dedc8e4860e8af3b43b7615e940d53c00d413bdc69"}, +] + +[[package]] +name = "types-urllib3" +version = "1.26.25.14" +description = "Typing stubs for urllib3" +optional = true +python-versions = "*" +files = [ + {file = "types-urllib3-1.26.25.14.tar.gz", hash = "sha256:229b7f577c951b8c1b92c1bc2b2fdb0b49847bd2af6d1cc2a2e3dd340f3bda8f"}, + {file = "types_urllib3-1.26.25.14-py3-none-any.whl", hash = "sha256:9683bbb7fb72e32bfe9d2be6e04875fbe1b3eeec3cbb4ea231435aa7fd6b4f0e"}, +] + +[[package]] +name = "typing-extensions" +version = "4.9.0" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" +files = [ + {file = "typing_extensions-4.9.0-py3-none-any.whl", hash = "sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd"}, + {file = "typing_extensions-4.9.0.tar.gz", hash = "sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783"}, +] + +[[package]] +name = "tzdata" +version = "2023.4" +description = "Provider of IANA time zone data" +optional = true +python-versions = ">=2" +files = [ + {file = "tzdata-2023.4-py2.py3-none-any.whl", hash = "sha256:aa3ace4329eeacda5b7beb7ea08ece826c28d761cda36e747cfbf97996d39bf3"}, + {file = "tzdata-2023.4.tar.gz", hash = "sha256:dd54c94f294765522c77399649b4fefd95522479a664a0cec87f41bebc6148c9"}, +] + +[[package]] +name = "uritemplate" +version = "4.1.1" +description = "Implementation of RFC 6570 URI Templates" +optional = false +python-versions = ">=3.6" +files = [ + {file = "uritemplate-4.1.1-py2.py3-none-any.whl", hash = "sha256:830c08b8d99bdd312ea4ead05994a38e8936266f84b9a7878232db50b044e02e"}, + {file = "uritemplate-4.1.1.tar.gz", hash = "sha256:4346edfc5c3b79f694bccd6d6099a322bbeb628dbf2cd86eea55a456ce5124f0"}, +] + +[[package]] +name = "urllib3" +version = "1.26.18" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +files = [ + {file = "urllib3-1.26.18-py2.py3-none-any.whl", hash = "sha256:34b97092d7e0a3a8cf7cd10e386f401b3737364026c45e622aa02903dffe0f07"}, + {file = "urllib3-1.26.18.tar.gz", hash = "sha256:f8ecc1bba5667413457c529ab955bf8c67b45db799d159066261719e328580a0"}, +] + +[package.dependencies] +PySocks = {version = ">=1.5.6,<1.5.7 || >1.5.7,<2.0", optional = true, markers = "extra == \"socks\""} + +[package.extras] +brotli = ["brotli (==1.0.9)", "brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] +secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] +socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] + +[[package]] +name = "uvicorn" +version = "0.23.2" +description = "The lightning-fast ASGI server." +optional = false +python-versions = ">=3.8" +files = [ + {file = "uvicorn-0.23.2-py3-none-any.whl", hash = "sha256:1f9be6558f01239d4fdf22ef8126c39cb1ad0addf76c40e760549d2c2f43ab53"}, + {file = "uvicorn-0.23.2.tar.gz", hash = "sha256:4d3cc12d7727ba72b64d12d3cc7743124074c0a69f7b201512fc50c3e3f1569a"}, +] + +[package.dependencies] +click = ">=7.0" +colorama = {version = ">=0.4", optional = true, markers = "sys_platform == \"win32\" and extra == \"standard\""} +h11 = ">=0.8" +httptools = {version = ">=0.5.0", optional = true, markers = "extra == \"standard\""} +python-dotenv = {version = ">=0.13", optional = true, markers = "extra == \"standard\""} +pyyaml = {version = ">=5.1", optional = true, markers = "extra == \"standard\""} +typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""} +uvloop = {version = ">=0.14.0,<0.15.0 || >0.15.0,<0.15.1 || >0.15.1", optional = true, markers = "(sys_platform != \"win32\" and sys_platform != \"cygwin\") and platform_python_implementation != \"PyPy\" and extra == \"standard\""} +watchfiles = {version = ">=0.13", optional = true, markers = "extra == \"standard\""} +websockets = {version = ">=10.4", optional = true, markers = "extra == \"standard\""} + +[package.extras] +standard = ["colorama (>=0.4)", "httptools (>=0.5.0)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.4)"] + +[[package]] +name = "uvloop" +version = "0.19.0" +description = "Fast implementation of asyncio event loop on top of libuv" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "uvloop-0.19.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:de4313d7f575474c8f5a12e163f6d89c0a878bc49219641d49e6f1444369a90e"}, + {file = "uvloop-0.19.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5588bd21cf1fcf06bded085f37e43ce0e00424197e7c10e77afd4bbefffef428"}, + {file = "uvloop-0.19.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b1fd71c3843327f3bbc3237bedcdb6504fd50368ab3e04d0410e52ec293f5b8"}, + {file = "uvloop-0.19.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a05128d315e2912791de6088c34136bfcdd0c7cbc1cf85fd6fd1bb321b7c849"}, + {file = "uvloop-0.19.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:cd81bdc2b8219cb4b2556eea39d2e36bfa375a2dd021404f90a62e44efaaf957"}, + {file = "uvloop-0.19.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5f17766fb6da94135526273080f3455a112f82570b2ee5daa64d682387fe0dcd"}, + {file = "uvloop-0.19.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4ce6b0af8f2729a02a5d1575feacb2a94fc7b2e983868b009d51c9a9d2149bef"}, + {file = "uvloop-0.19.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:31e672bb38b45abc4f26e273be83b72a0d28d074d5b370fc4dcf4c4eb15417d2"}, + {file = "uvloop-0.19.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:570fc0ed613883d8d30ee40397b79207eedd2624891692471808a95069a007c1"}, + {file = "uvloop-0.19.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5138821e40b0c3e6c9478643b4660bd44372ae1e16a322b8fc07478f92684e24"}, + {file = "uvloop-0.19.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:91ab01c6cd00e39cde50173ba4ec68a1e578fee9279ba64f5221810a9e786533"}, + {file = "uvloop-0.19.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:47bf3e9312f63684efe283f7342afb414eea4d3011542155c7e625cd799c3b12"}, + {file = "uvloop-0.19.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:da8435a3bd498419ee8c13c34b89b5005130a476bda1d6ca8cfdde3de35cd650"}, + {file = "uvloop-0.19.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:02506dc23a5d90e04d4f65c7791e65cf44bd91b37f24cfc3ef6cf2aff05dc7ec"}, + {file = "uvloop-0.19.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2693049be9d36fef81741fddb3f441673ba12a34a704e7b4361efb75cf30befc"}, + {file = "uvloop-0.19.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7010271303961c6f0fe37731004335401eb9075a12680738731e9c92ddd96ad6"}, + {file = "uvloop-0.19.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:5daa304d2161d2918fa9a17d5635099a2f78ae5b5960e742b2fcfbb7aefaa593"}, + {file = "uvloop-0.19.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:7207272c9520203fea9b93843bb775d03e1cf88a80a936ce760f60bb5add92f3"}, + {file = "uvloop-0.19.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:78ab247f0b5671cc887c31d33f9b3abfb88d2614b84e4303f1a63b46c046c8bd"}, + {file = "uvloop-0.19.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:472d61143059c84947aa8bb74eabbace30d577a03a1805b77933d6bd13ddebbd"}, + {file = "uvloop-0.19.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45bf4c24c19fb8a50902ae37c5de50da81de4922af65baf760f7c0c42e1088be"}, + {file = "uvloop-0.19.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:271718e26b3e17906b28b67314c45d19106112067205119dddbd834c2b7ce797"}, + {file = "uvloop-0.19.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:34175c9fd2a4bc3adc1380e1261f60306344e3407c20a4d684fd5f3be010fa3d"}, + {file = "uvloop-0.19.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e27f100e1ff17f6feeb1f33968bc185bf8ce41ca557deee9d9bbbffeb72030b7"}, + {file = "uvloop-0.19.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:13dfdf492af0aa0a0edf66807d2b465607d11c4fa48f4a1fd41cbea5b18e8e8b"}, + {file = "uvloop-0.19.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6e3d4e85ac060e2342ff85e90d0c04157acb210b9ce508e784a944f852a40e67"}, + {file = "uvloop-0.19.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8ca4956c9ab567d87d59d49fa3704cf29e37109ad348f2d5223c9bf761a332e7"}, + {file = "uvloop-0.19.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f467a5fd23b4fc43ed86342641f3936a68ded707f4627622fa3f82a120e18256"}, + {file = "uvloop-0.19.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:492e2c32c2af3f971473bc22f086513cedfc66a130756145a931a90c3958cb17"}, + {file = "uvloop-0.19.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2df95fca285a9f5bfe730e51945ffe2fa71ccbfdde3b0da5772b4ee4f2e770d5"}, + {file = "uvloop-0.19.0.tar.gz", hash = "sha256:0246f4fd1bf2bf702e06b0d45ee91677ee5c31242f39aab4ea6fe0c51aedd0fd"}, +] + +[package.extras] +docs = ["Sphinx (>=4.1.2,<4.2.0)", "sphinx-rtd-theme (>=0.5.2,<0.6.0)", "sphinxcontrib-asyncio (>=0.3.0,<0.4.0)"] +test = ["Cython (>=0.29.36,<0.30.0)", "aiohttp (==3.9.0b0)", "aiohttp (>=3.8.1)", "flake8 (>=5.0,<6.0)", "mypy (>=0.800)", "psutil", "pyOpenSSL (>=23.0.0,<23.1.0)", "pycodestyle (>=2.9.0,<2.10.0)"] + +[[package]] +name = "vcrpy" +version = "5.1.0" +description = "Automatically mock your HTTP interactions to simplify and speed up testing" +optional = false +python-versions = ">=3.8" +files = [] +develop = false + +[package.dependencies] +PyYAML = "*" +wrapt = "*" +yarl = "*" + +[package.source] +type = "git" +url = "https://github.com/Significant-Gravitas/vcrpy.git" +reference = "master" +resolved_reference = "bfd15f9d06a516138b673cb481547f3352d9cc43" + +[[package]] +name = "virtualenv" +version = "20.25.0" +description = "Virtual Python Environment builder" +optional = false +python-versions = ">=3.7" +files = [ + {file = "virtualenv-20.25.0-py3-none-any.whl", hash = "sha256:4238949c5ffe6876362d9c0180fc6c3a824a7b12b80604eeb8085f2ed7460de3"}, + {file = "virtualenv-20.25.0.tar.gz", hash = "sha256:bf51c0d9c7dd63ea8e44086fa1e4fb1093a31e963b86959257378aef020e1f1b"}, +] + +[package.dependencies] +distlib = ">=0.3.7,<1" +filelock = ">=3.12.2,<4" +platformdirs = ">=3.9.1,<5" + +[package.extras] +docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] +test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"] + +[[package]] +name = "wasabi" +version = "1.1.2" +description = "A lightweight console printing and formatting toolkit" +optional = false +python-versions = ">=3.6" +files = [ + {file = "wasabi-1.1.2-py3-none-any.whl", hash = "sha256:0a3f933c4bf0ed3f93071132c1b87549733256d6c8de6473c5f7ed2e171b5cf9"}, + {file = "wasabi-1.1.2.tar.gz", hash = "sha256:1aaef3aceaa32edb9c91330d29d3936c0c39fdb965743549c173cb54b16c30b5"}, +] + +[package.dependencies] +colorama = {version = ">=0.4.6", markers = "sys_platform == \"win32\" and python_version >= \"3.7\""} + +[[package]] +name = "watchfiles" +version = "0.21.0" +description = "Simple, modern and high performance file watching and code reload in python." +optional = false +python-versions = ">=3.8" +files = [ + {file = "watchfiles-0.21.0-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:27b4035013f1ea49c6c0b42d983133b136637a527e48c132d368eb19bf1ac6aa"}, + {file = "watchfiles-0.21.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c81818595eff6e92535ff32825f31c116f867f64ff8cdf6562cd1d6b2e1e8f3e"}, + {file = "watchfiles-0.21.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6c107ea3cf2bd07199d66f156e3ea756d1b84dfd43b542b2d870b77868c98c03"}, + {file = "watchfiles-0.21.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d9ac347653ebd95839a7c607608703b20bc07e577e870d824fa4801bc1cb124"}, + {file = "watchfiles-0.21.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5eb86c6acb498208e7663ca22dbe68ca2cf42ab5bf1c776670a50919a56e64ab"}, + {file = "watchfiles-0.21.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f564bf68404144ea6b87a78a3f910cc8de216c6b12a4cf0b27718bf4ec38d303"}, + {file = "watchfiles-0.21.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d0f32ebfaa9c6011f8454994f86108c2eb9c79b8b7de00b36d558cadcedaa3d"}, + {file = "watchfiles-0.21.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b6d45d9b699ecbac6c7bd8e0a2609767491540403610962968d258fd6405c17c"}, + {file = "watchfiles-0.21.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:aff06b2cac3ef4616e26ba17a9c250c1fe9dd8a5d907d0193f84c499b1b6e6a9"}, + {file = "watchfiles-0.21.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d9792dff410f266051025ecfaa927078b94cc7478954b06796a9756ccc7e14a9"}, + {file = "watchfiles-0.21.0-cp310-none-win32.whl", hash = "sha256:214cee7f9e09150d4fb42e24919a1e74d8c9b8a9306ed1474ecaddcd5479c293"}, + {file = "watchfiles-0.21.0-cp310-none-win_amd64.whl", hash = "sha256:1ad7247d79f9f55bb25ab1778fd47f32d70cf36053941f07de0b7c4e96b5d235"}, + {file = "watchfiles-0.21.0-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:668c265d90de8ae914f860d3eeb164534ba2e836811f91fecc7050416ee70aa7"}, + {file = "watchfiles-0.21.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a23092a992e61c3a6a70f350a56db7197242f3490da9c87b500f389b2d01eef"}, + {file = "watchfiles-0.21.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:e7941bbcfdded9c26b0bf720cb7e6fd803d95a55d2c14b4bd1f6a2772230c586"}, + {file = "watchfiles-0.21.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:11cd0c3100e2233e9c53106265da31d574355c288e15259c0d40a4405cbae317"}, + {file = "watchfiles-0.21.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d78f30cbe8b2ce770160d3c08cff01b2ae9306fe66ce899b73f0409dc1846c1b"}, + {file = "watchfiles-0.21.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6674b00b9756b0af620aa2a3346b01f8e2a3dc729d25617e1b89cf6af4a54eb1"}, + {file = "watchfiles-0.21.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fd7ac678b92b29ba630d8c842d8ad6c555abda1b9ef044d6cc092dacbfc9719d"}, + {file = "watchfiles-0.21.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c873345680c1b87f1e09e0eaf8cf6c891b9851d8b4d3645e7efe2ec20a20cc7"}, + {file = "watchfiles-0.21.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:49f56e6ecc2503e7dbe233fa328b2be1a7797d31548e7a193237dcdf1ad0eee0"}, + {file = "watchfiles-0.21.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:02d91cbac553a3ad141db016e3350b03184deaafeba09b9d6439826ee594b365"}, + {file = "watchfiles-0.21.0-cp311-none-win32.whl", hash = "sha256:ebe684d7d26239e23d102a2bad2a358dedf18e462e8808778703427d1f584400"}, + {file = "watchfiles-0.21.0-cp311-none-win_amd64.whl", hash = "sha256:4566006aa44cb0d21b8ab53baf4b9c667a0ed23efe4aaad8c227bfba0bf15cbe"}, + {file = "watchfiles-0.21.0-cp311-none-win_arm64.whl", hash = "sha256:c550a56bf209a3d987d5a975cdf2063b3389a5d16caf29db4bdddeae49f22078"}, + {file = "watchfiles-0.21.0-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:51ddac60b96a42c15d24fbdc7a4bfcd02b5a29c047b7f8bf63d3f6f5a860949a"}, + {file = "watchfiles-0.21.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:511f0b034120cd1989932bf1e9081aa9fb00f1f949fbd2d9cab6264916ae89b1"}, + {file = "watchfiles-0.21.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:cfb92d49dbb95ec7a07511bc9efb0faff8fe24ef3805662b8d6808ba8409a71a"}, + {file = "watchfiles-0.21.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f92944efc564867bbf841c823c8b71bb0be75e06b8ce45c084b46411475a915"}, + {file = "watchfiles-0.21.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:642d66b75eda909fd1112d35c53816d59789a4b38c141a96d62f50a3ef9b3360"}, + {file = "watchfiles-0.21.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d23bcd6c8eaa6324fe109d8cac01b41fe9a54b8c498af9ce464c1aeeb99903d6"}, + {file = "watchfiles-0.21.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18d5b4da8cf3e41895b34e8c37d13c9ed294954907929aacd95153508d5d89d7"}, + {file = "watchfiles-0.21.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b8d1eae0f65441963d805f766c7e9cd092f91e0c600c820c764a4ff71a0764c"}, + {file = "watchfiles-0.21.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1fd9a5205139f3c6bb60d11f6072e0552f0a20b712c85f43d42342d162be1235"}, + {file = "watchfiles-0.21.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a1e3014a625bcf107fbf38eece0e47fa0190e52e45dc6eee5a8265ddc6dc5ea7"}, + {file = "watchfiles-0.21.0-cp312-none-win32.whl", hash = "sha256:9d09869f2c5a6f2d9df50ce3064b3391d3ecb6dced708ad64467b9e4f2c9bef3"}, + {file = "watchfiles-0.21.0-cp312-none-win_amd64.whl", hash = "sha256:18722b50783b5e30a18a8a5db3006bab146d2b705c92eb9a94f78c72beb94094"}, + {file = "watchfiles-0.21.0-cp312-none-win_arm64.whl", hash = "sha256:a3b9bec9579a15fb3ca2d9878deae789df72f2b0fdaf90ad49ee389cad5edab6"}, + {file = "watchfiles-0.21.0-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:4ea10a29aa5de67de02256a28d1bf53d21322295cb00bd2d57fcd19b850ebd99"}, + {file = "watchfiles-0.21.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:40bca549fdc929b470dd1dbfcb47b3295cb46a6d2c90e50588b0a1b3bd98f429"}, + {file = "watchfiles-0.21.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9b37a7ba223b2f26122c148bb8d09a9ff312afca998c48c725ff5a0a632145f7"}, + {file = "watchfiles-0.21.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec8c8900dc5c83650a63dd48c4d1d245343f904c4b64b48798c67a3767d7e165"}, + {file = "watchfiles-0.21.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8ad3fe0a3567c2f0f629d800409cd528cb6251da12e81a1f765e5c5345fd0137"}, + {file = "watchfiles-0.21.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9d353c4cfda586db2a176ce42c88f2fc31ec25e50212650c89fdd0f560ee507b"}, + {file = "watchfiles-0.21.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:83a696da8922314ff2aec02987eefb03784f473281d740bf9170181829133765"}, + {file = "watchfiles-0.21.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a03651352fc20975ee2a707cd2d74a386cd303cc688f407296064ad1e6d1562"}, + {file = "watchfiles-0.21.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:3ad692bc7792be8c32918c699638b660c0de078a6cbe464c46e1340dadb94c19"}, + {file = "watchfiles-0.21.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06247538e8253975bdb328e7683f8515ff5ff041f43be6c40bff62d989b7d0b0"}, + {file = "watchfiles-0.21.0-cp38-none-win32.whl", hash = "sha256:9a0aa47f94ea9a0b39dd30850b0adf2e1cd32a8b4f9c7aa443d852aacf9ca214"}, + {file = "watchfiles-0.21.0-cp38-none-win_amd64.whl", hash = "sha256:8d5f400326840934e3507701f9f7269247f7c026d1b6cfd49477d2be0933cfca"}, + {file = "watchfiles-0.21.0-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:7f762a1a85a12cc3484f77eee7be87b10f8c50b0b787bb02f4e357403cad0c0e"}, + {file = "watchfiles-0.21.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6e9be3ef84e2bb9710f3f777accce25556f4a71e15d2b73223788d528fcc2052"}, + {file = "watchfiles-0.21.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:4c48a10d17571d1275701e14a601e36959ffada3add8cdbc9e5061a6e3579a5d"}, + {file = "watchfiles-0.21.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c889025f59884423428c261f212e04d438de865beda0b1e1babab85ef4c0f01"}, + {file = "watchfiles-0.21.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:66fac0c238ab9a2e72d026b5fb91cb902c146202bbd29a9a1a44e8db7b710b6f"}, + {file = "watchfiles-0.21.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b4a21f71885aa2744719459951819e7bf5a906a6448a6b2bbce8e9cc9f2c8128"}, + {file = "watchfiles-0.21.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c9198c989f47898b2c22201756f73249de3748e0fc9de44adaf54a8b259cc0c"}, + {file = "watchfiles-0.21.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8f57c4461cd24fda22493109c45b3980863c58a25b8bec885ca8bea6b8d4b28"}, + {file = "watchfiles-0.21.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:853853cbf7bf9408b404754b92512ebe3e3a83587503d766d23e6bf83d092ee6"}, + {file = "watchfiles-0.21.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d5b1dc0e708fad9f92c296ab2f948af403bf201db8fb2eb4c8179db143732e49"}, + {file = "watchfiles-0.21.0-cp39-none-win32.whl", hash = "sha256:59137c0c6826bd56c710d1d2bda81553b5e6b7c84d5a676747d80caf0409ad94"}, + {file = "watchfiles-0.21.0-cp39-none-win_amd64.whl", hash = "sha256:6cb8fdc044909e2078c248986f2fc76f911f72b51ea4a4fbbf472e01d14faa58"}, + {file = "watchfiles-0.21.0-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:ab03a90b305d2588e8352168e8c5a1520b721d2d367f31e9332c4235b30b8994"}, + {file = "watchfiles-0.21.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:927c589500f9f41e370b0125c12ac9e7d3a2fd166b89e9ee2828b3dda20bfe6f"}, + {file = "watchfiles-0.21.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bd467213195e76f838caf2c28cd65e58302d0254e636e7c0fca81efa4a2e62c"}, + {file = "watchfiles-0.21.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02b73130687bc3f6bb79d8a170959042eb56eb3a42df3671c79b428cd73f17cc"}, + {file = "watchfiles-0.21.0-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:08dca260e85ffae975448e344834d765983237ad6dc308231aa16e7933db763e"}, + {file = "watchfiles-0.21.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:3ccceb50c611c433145502735e0370877cced72a6c70fd2410238bcbc7fe51d8"}, + {file = "watchfiles-0.21.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:57d430f5fb63fea141ab71ca9c064e80de3a20b427ca2febcbfcef70ff0ce895"}, + {file = "watchfiles-0.21.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0dd5fad9b9c0dd89904bbdea978ce89a2b692a7ee8a0ce19b940e538c88a809c"}, + {file = "watchfiles-0.21.0-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:be6dd5d52b73018b21adc1c5d28ac0c68184a64769052dfeb0c5d9998e7f56a2"}, + {file = "watchfiles-0.21.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:b3cab0e06143768499384a8a5efb9c4dc53e19382952859e4802f294214f36ec"}, + {file = "watchfiles-0.21.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c6ed10c2497e5fedadf61e465b3ca12a19f96004c15dcffe4bd442ebadc2d85"}, + {file = "watchfiles-0.21.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:43babacef21c519bc6631c5fce2a61eccdfc011b4bcb9047255e9620732c8097"}, + {file = "watchfiles-0.21.0.tar.gz", hash = "sha256:c76c635fabf542bb78524905718c39f736a98e5ab25b23ec6d4abede1a85a6a3"}, +] + +[package.dependencies] +anyio = ">=3.0.0" + +[[package]] +name = "wcwidth" +version = "0.2.13" +description = "Measures the displayed width of unicode strings in a terminal" +optional = false +python-versions = "*" +files = [ + {file = "wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859"}, + {file = "wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5"}, +] + +[[package]] +name = "webdriver-manager" +version = "4.0.1" +description = "Library provides the way to automatically manage drivers for different browsers" +optional = false +python-versions = ">=3.7" +files = [ + {file = "webdriver_manager-4.0.1-py2.py3-none-any.whl", hash = "sha256:d7970052295bb9cda2c1a24cf0b872dd2c41ababcc78f7b6b8dc37a41e979a7e"}, + {file = "webdriver_manager-4.0.1.tar.gz", hash = "sha256:25ec177c6a2ce9c02fb8046f1b2732701a9418d6a977967bb065d840a3175d87"}, +] + +[package.dependencies] +packaging = "*" +python-dotenv = "*" +requests = "*" + +[[package]] +name = "websocket-client" +version = "1.7.0" +description = "WebSocket client for Python with low level API options" +optional = false +python-versions = ">=3.8" +files = [ + {file = "websocket-client-1.7.0.tar.gz", hash = "sha256:10e511ea3a8c744631d3bd77e61eb17ed09304c413ad42cf6ddfa4c7787e8fe6"}, + {file = "websocket_client-1.7.0-py3-none-any.whl", hash = "sha256:f4c3d22fec12a2461427a29957ff07d35098ee2d976d3ba244e688b8b4057588"}, +] + +[package.extras] +docs = ["Sphinx (>=6.0)", "sphinx-rtd-theme (>=1.1.0)"] +optional = ["python-socks", "wsaccel"] +test = ["websockets"] + +[[package]] +name = "websockets" +version = "12.0" +description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "websockets-12.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d554236b2a2006e0ce16315c16eaa0d628dab009c33b63ea03f41c6107958374"}, + {file = "websockets-12.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2d225bb6886591b1746b17c0573e29804619c8f755b5598d875bb4235ea639be"}, + {file = "websockets-12.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:eb809e816916a3b210bed3c82fb88eaf16e8afcf9c115ebb2bacede1797d2547"}, + {file = "websockets-12.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c588f6abc13f78a67044c6b1273a99e1cf31038ad51815b3b016ce699f0d75c2"}, + {file = "websockets-12.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5aa9348186d79a5f232115ed3fa9020eab66d6c3437d72f9d2c8ac0c6858c558"}, + {file = "websockets-12.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6350b14a40c95ddd53e775dbdbbbc59b124a5c8ecd6fbb09c2e52029f7a9f480"}, + {file = "websockets-12.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:70ec754cc2a769bcd218ed8d7209055667b30860ffecb8633a834dde27d6307c"}, + {file = "websockets-12.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6e96f5ed1b83a8ddb07909b45bd94833b0710f738115751cdaa9da1fb0cb66e8"}, + {file = "websockets-12.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4d87be612cbef86f994178d5186add3d94e9f31cc3cb499a0482b866ec477603"}, + {file = "websockets-12.0-cp310-cp310-win32.whl", hash = "sha256:befe90632d66caaf72e8b2ed4d7f02b348913813c8b0a32fae1cc5fe3730902f"}, + {file = "websockets-12.0-cp310-cp310-win_amd64.whl", hash = "sha256:363f57ca8bc8576195d0540c648aa58ac18cf85b76ad5202b9f976918f4219cf"}, + {file = "websockets-12.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5d873c7de42dea355d73f170be0f23788cf3fa9f7bed718fd2830eefedce01b4"}, + {file = "websockets-12.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3f61726cae9f65b872502ff3c1496abc93ffbe31b278455c418492016e2afc8f"}, + {file = "websockets-12.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ed2fcf7a07334c77fc8a230755c2209223a7cc44fc27597729b8ef5425aa61a3"}, + {file = "websockets-12.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e332c210b14b57904869ca9f9bf4ca32f5427a03eeb625da9b616c85a3a506c"}, + {file = "websockets-12.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5693ef74233122f8ebab026817b1b37fe25c411ecfca084b29bc7d6efc548f45"}, + {file = "websockets-12.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e9e7db18b4539a29cc5ad8c8b252738a30e2b13f033c2d6e9d0549b45841c04"}, + {file = "websockets-12.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6e2df67b8014767d0f785baa98393725739287684b9f8d8a1001eb2839031447"}, + {file = "websockets-12.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:bea88d71630c5900690fcb03161ab18f8f244805c59e2e0dc4ffadae0a7ee0ca"}, + {file = "websockets-12.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dff6cdf35e31d1315790149fee351f9e52978130cef6c87c4b6c9b3baf78bc53"}, + {file = "websockets-12.0-cp311-cp311-win32.whl", hash = "sha256:3e3aa8c468af01d70332a382350ee95f6986db479ce7af14d5e81ec52aa2b402"}, + {file = "websockets-12.0-cp311-cp311-win_amd64.whl", hash = "sha256:25eb766c8ad27da0f79420b2af4b85d29914ba0edf69f547cc4f06ca6f1d403b"}, + {file = "websockets-12.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0e6e2711d5a8e6e482cacb927a49a3d432345dfe7dea8ace7b5790df5932e4df"}, + {file = "websockets-12.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:dbcf72a37f0b3316e993e13ecf32f10c0e1259c28ffd0a85cee26e8549595fbc"}, + {file = "websockets-12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:12743ab88ab2af1d17dd4acb4645677cb7063ef4db93abffbf164218a5d54c6b"}, + {file = "websockets-12.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b645f491f3c48d3f8a00d1fce07445fab7347fec54a3e65f0725d730d5b99cb"}, + {file = "websockets-12.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9893d1aa45a7f8b3bc4510f6ccf8db8c3b62120917af15e3de247f0780294b92"}, + {file = "websockets-12.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f38a7b376117ef7aff996e737583172bdf535932c9ca021746573bce40165ed"}, + {file = "websockets-12.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:f764ba54e33daf20e167915edc443b6f88956f37fb606449b4a5b10ba42235a5"}, + {file = "websockets-12.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:1e4b3f8ea6a9cfa8be8484c9221ec0257508e3a1ec43c36acdefb2a9c3b00aa2"}, + {file = "websockets-12.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9fdf06fd06c32205a07e47328ab49c40fc1407cdec801d698a7c41167ea45113"}, + {file = "websockets-12.0-cp312-cp312-win32.whl", hash = "sha256:baa386875b70cbd81798fa9f71be689c1bf484f65fd6fb08d051a0ee4e79924d"}, + {file = "websockets-12.0-cp312-cp312-win_amd64.whl", hash = "sha256:ae0a5da8f35a5be197f328d4727dbcfafa53d1824fac3d96cdd3a642fe09394f"}, + {file = "websockets-12.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5f6ffe2c6598f7f7207eef9a1228b6f5c818f9f4d53ee920aacd35cec8110438"}, + {file = "websockets-12.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9edf3fc590cc2ec20dc9d7a45108b5bbaf21c0d89f9fd3fd1685e223771dc0b2"}, + {file = "websockets-12.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8572132c7be52632201a35f5e08348137f658e5ffd21f51f94572ca6c05ea81d"}, + {file = "websockets-12.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:604428d1b87edbf02b233e2c207d7d528460fa978f9e391bd8aaf9c8311de137"}, + {file = "websockets-12.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1a9d160fd080c6285e202327aba140fc9a0d910b09e423afff4ae5cbbf1c7205"}, + {file = "websockets-12.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87b4aafed34653e465eb77b7c93ef058516cb5acf3eb21e42f33928616172def"}, + {file = "websockets-12.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b2ee7288b85959797970114deae81ab41b731f19ebcd3bd499ae9ca0e3f1d2c8"}, + {file = "websockets-12.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:7fa3d25e81bfe6a89718e9791128398a50dec6d57faf23770787ff441d851967"}, + {file = "websockets-12.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a571f035a47212288e3b3519944f6bf4ac7bc7553243e41eac50dd48552b6df7"}, + {file = "websockets-12.0-cp38-cp38-win32.whl", hash = "sha256:3c6cc1360c10c17463aadd29dd3af332d4a1adaa8796f6b0e9f9df1fdb0bad62"}, + {file = "websockets-12.0-cp38-cp38-win_amd64.whl", hash = "sha256:1bf386089178ea69d720f8db6199a0504a406209a0fc23e603b27b300fdd6892"}, + {file = "websockets-12.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:ab3d732ad50a4fbd04a4490ef08acd0517b6ae6b77eb967251f4c263011a990d"}, + {file = "websockets-12.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a1d9697f3337a89691e3bd8dc56dea45a6f6d975f92e7d5f773bc715c15dde28"}, + {file = "websockets-12.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1df2fbd2c8a98d38a66f5238484405b8d1d16f929bb7a33ed73e4801222a6f53"}, + {file = "websockets-12.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23509452b3bc38e3a057382c2e941d5ac2e01e251acce7adc74011d7d8de434c"}, + {file = "websockets-12.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e5fc14ec6ea568200ea4ef46545073da81900a2b67b3e666f04adf53ad452ec"}, + {file = "websockets-12.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46e71dbbd12850224243f5d2aeec90f0aaa0f2dde5aeeb8fc8df21e04d99eff9"}, + {file = "websockets-12.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b81f90dcc6c85a9b7f29873beb56c94c85d6f0dac2ea8b60d995bd18bf3e2aae"}, + {file = "websockets-12.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:a02413bc474feda2849c59ed2dfb2cddb4cd3d2f03a2fedec51d6e959d9b608b"}, + {file = "websockets-12.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bbe6013f9f791944ed31ca08b077e26249309639313fff132bfbf3ba105673b9"}, + {file = "websockets-12.0-cp39-cp39-win32.whl", hash = "sha256:cbe83a6bbdf207ff0541de01e11904827540aa069293696dd528a6640bd6a5f6"}, + {file = "websockets-12.0-cp39-cp39-win_amd64.whl", hash = "sha256:fc4e7fa5414512b481a2483775a8e8be7803a35b30ca805afa4998a84f9fd9e8"}, + {file = "websockets-12.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:248d8e2446e13c1d4326e0a6a4e9629cb13a11195051a73acf414812700badbd"}, + {file = "websockets-12.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f44069528d45a933997a6fef143030d8ca8042f0dfaad753e2906398290e2870"}, + {file = "websockets-12.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c4e37d36f0d19f0a4413d3e18c0d03d0c268ada2061868c1e6f5ab1a6d575077"}, + {file = "websockets-12.0-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d829f975fc2e527a3ef2f9c8f25e553eb7bc779c6665e8e1d52aa22800bb38b"}, + {file = "websockets-12.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:2c71bd45a777433dd9113847af751aae36e448bc6b8c361a566cb043eda6ec30"}, + {file = "websockets-12.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0bee75f400895aef54157b36ed6d3b308fcab62e5260703add87f44cee9c82a6"}, + {file = "websockets-12.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:423fc1ed29f7512fceb727e2d2aecb952c46aa34895e9ed96071821309951123"}, + {file = "websockets-12.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27a5e9964ef509016759f2ef3f2c1e13f403725a5e6a1775555994966a66e931"}, + {file = "websockets-12.0-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3181df4583c4d3994d31fb235dc681d2aaad744fbdbf94c4802485ececdecf2"}, + {file = "websockets-12.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:b067cb952ce8bf40115f6c19f478dc71c5e719b7fbaa511359795dfd9d1a6468"}, + {file = "websockets-12.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:00700340c6c7ab788f176d118775202aadea7602c5cc6be6ae127761c16d6b0b"}, + {file = "websockets-12.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e469d01137942849cff40517c97a30a93ae79917752b34029f0ec72df6b46399"}, + {file = "websockets-12.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffefa1374cd508d633646d51a8e9277763a9b78ae71324183693959cf94635a7"}, + {file = "websockets-12.0-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba0cab91b3956dfa9f512147860783a1829a8d905ee218a9837c18f683239611"}, + {file = "websockets-12.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2cb388a5bfb56df4d9a406783b7f9dbefb888c09b71629351cc6b036e9259370"}, + {file = "websockets-12.0-py3-none-any.whl", hash = "sha256:dc284bbc8d7c78a6c69e0c7325ab46ee5e40bb4d50e494d8131a07ef47500e9e"}, + {file = "websockets-12.0.tar.gz", hash = "sha256:81df9cbcbb6c260de1e007e58c011bfebe2dafc8435107b0537f393dd38c8b1b"}, +] + +[[package]] +name = "win32-setctime" +version = "1.1.0" +description = "A small Python utility to set file creation time on Windows" +optional = false +python-versions = ">=3.5" +files = [ + {file = "win32_setctime-1.1.0-py3-none-any.whl", hash = "sha256:231db239e959c2fe7eb1d7dc129f11172354f98361c4fa2d6d2d7e278baa8aad"}, + {file = "win32_setctime-1.1.0.tar.gz", hash = "sha256:15cf5750465118d6929ae4de4eb46e8edae9a5634350c01ba582df868e932cb2"}, +] + +[package.extras] +dev = ["black (>=19.3b0)", "pytest (>=4.6.2)"] + +[[package]] +name = "wrapt" +version = "1.16.0" +description = "Module for decorators, wrappers and monkey patching." +optional = false +python-versions = ">=3.6" +files = [ + {file = "wrapt-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ffa565331890b90056c01db69c0fe634a776f8019c143a5ae265f9c6bc4bd6d4"}, + {file = "wrapt-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e4fdb9275308292e880dcbeb12546df7f3e0f96c6b41197e0cf37d2826359020"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb2dee3874a500de01c93d5c71415fcaef1d858370d405824783e7a8ef5db440"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a88e6010048489cda82b1326889ec075a8c856c2e6a256072b28eaee3ccf487"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac83a914ebaf589b69f7d0a1277602ff494e21f4c2f743313414378f8f50a4cf"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:73aa7d98215d39b8455f103de64391cb79dfcad601701a3aa0dddacf74911d72"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:807cc8543a477ab7422f1120a217054f958a66ef7314f76dd9e77d3f02cdccd0"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bf5703fdeb350e36885f2875d853ce13172ae281c56e509f4e6eca049bdfb136"}, + {file = "wrapt-1.16.0-cp310-cp310-win32.whl", hash = "sha256:f6b2d0c6703c988d334f297aa5df18c45e97b0af3679bb75059e0e0bd8b1069d"}, + {file = "wrapt-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:decbfa2f618fa8ed81c95ee18a387ff973143c656ef800c9f24fb7e9c16054e2"}, + {file = "wrapt-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1a5db485fe2de4403f13fafdc231b0dbae5eca4359232d2efc79025527375b09"}, + {file = "wrapt-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:75ea7d0ee2a15733684badb16de6794894ed9c55aa5e9903260922f0482e687d"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a452f9ca3e3267cd4d0fcf2edd0d035b1934ac2bd7e0e57ac91ad6b95c0c6389"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:43aa59eadec7890d9958748db829df269f0368521ba6dc68cc172d5d03ed8060"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72554a23c78a8e7aa02abbd699d129eead8b147a23c56e08d08dfc29cfdddca1"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d2efee35b4b0a347e0d99d28e884dfd82797852d62fcd7ebdeee26f3ceb72cf3"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6dcfcffe73710be01d90cae08c3e548d90932d37b39ef83969ae135d36ef3956"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:eb6e651000a19c96f452c85132811d25e9264d836951022d6e81df2fff38337d"}, + {file = "wrapt-1.16.0-cp311-cp311-win32.whl", hash = "sha256:66027d667efe95cc4fa945af59f92c5a02c6f5bb6012bff9e60542c74c75c362"}, + {file = "wrapt-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:aefbc4cb0a54f91af643660a0a150ce2c090d3652cf4052a5397fb2de549cd89"}, + {file = "wrapt-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5eb404d89131ec9b4f748fa5cfb5346802e5ee8836f57d516576e61f304f3b7b"}, + {file = "wrapt-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9090c9e676d5236a6948330e83cb89969f433b1943a558968f659ead07cb3b36"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94265b00870aa407bd0cbcfd536f17ecde43b94fb8d228560a1e9d3041462d73"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2058f813d4f2b5e3a9eb2eb3faf8f1d99b81c3e51aeda4b168406443e8ba809"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98b5e1f498a8ca1858a1cdbffb023bfd954da4e3fa2c0cb5853d40014557248b"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:14d7dc606219cdd7405133c713f2c218d4252f2a469003f8c46bb92d5d095d81"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:49aac49dc4782cb04f58986e81ea0b4768e4ff197b57324dcbd7699c5dfb40b9"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:418abb18146475c310d7a6dc71143d6f7adec5b004ac9ce08dc7a34e2babdc5c"}, + {file = "wrapt-1.16.0-cp312-cp312-win32.whl", hash = "sha256:685f568fa5e627e93f3b52fda002c7ed2fa1800b50ce51f6ed1d572d8ab3e7fc"}, + {file = "wrapt-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:dcdba5c86e368442528f7060039eda390cc4091bfd1dca41e8046af7c910dda8"}, + {file = "wrapt-1.16.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:d462f28826f4657968ae51d2181a074dfe03c200d6131690b7d65d55b0f360f8"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a33a747400b94b6d6b8a165e4480264a64a78c8a4c734b62136062e9a248dd39"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3646eefa23daeba62643a58aac816945cadc0afaf21800a1421eeba5f6cfb9c"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ebf019be5c09d400cf7b024aa52b1f3aeebeff51550d007e92c3c1c4afc2a40"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:0d2691979e93d06a95a26257adb7bfd0c93818e89b1406f5a28f36e0d8c1e1fc"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:1acd723ee2a8826f3d53910255643e33673e1d11db84ce5880675954183ec47e"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bc57efac2da352a51cc4658878a68d2b1b67dbe9d33c36cb826ca449d80a8465"}, + {file = "wrapt-1.16.0-cp36-cp36m-win32.whl", hash = "sha256:da4813f751142436b075ed7aa012a8778aa43a99f7b36afe9b742d3ed8bdc95e"}, + {file = "wrapt-1.16.0-cp36-cp36m-win_amd64.whl", hash = "sha256:6f6eac2360f2d543cc875a0e5efd413b6cbd483cb3ad7ebf888884a6e0d2e966"}, + {file = "wrapt-1.16.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a0ea261ce52b5952bf669684a251a66df239ec6d441ccb59ec7afa882265d593"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bd2d7ff69a2cac767fbf7a2b206add2e9a210e57947dd7ce03e25d03d2de292"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9159485323798c8dc530a224bd3ffcf76659319ccc7bbd52e01e73bd0241a0c5"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a86373cf37cd7764f2201b76496aba58a52e76dedfaa698ef9e9688bfd9e41cf"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:73870c364c11f03ed072dda68ff7aea6d2a3a5c3fe250d917a429c7432e15228"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b935ae30c6e7400022b50f8d359c03ed233d45b725cfdd299462f41ee5ffba6f"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:db98ad84a55eb09b3c32a96c576476777e87c520a34e2519d3e59c44710c002c"}, + {file = "wrapt-1.16.0-cp37-cp37m-win32.whl", hash = "sha256:9153ed35fc5e4fa3b2fe97bddaa7cbec0ed22412b85bcdaf54aeba92ea37428c"}, + {file = "wrapt-1.16.0-cp37-cp37m-win_amd64.whl", hash = "sha256:66dfbaa7cfa3eb707bbfcd46dab2bc6207b005cbc9caa2199bcbc81d95071a00"}, + {file = "wrapt-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1dd50a2696ff89f57bd8847647a1c363b687d3d796dc30d4dd4a9d1689a706f0"}, + {file = "wrapt-1.16.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:44a2754372e32ab315734c6c73b24351d06e77ffff6ae27d2ecf14cf3d229202"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e9723528b9f787dc59168369e42ae1c3b0d3fadb2f1a71de14531d321ee05b0"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbed418ba5c3dce92619656802cc5355cb679e58d0d89b50f116e4a9d5a9603e"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:941988b89b4fd6b41c3f0bfb20e92bd23746579736b7343283297c4c8cbae68f"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6a42cd0cfa8ffc1915aef79cb4284f6383d8a3e9dcca70c445dcfdd639d51267"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1ca9b6085e4f866bd584fb135a041bfc32cab916e69f714a7d1d397f8c4891ca"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5e49454f19ef621089e204f862388d29e6e8d8b162efce05208913dde5b9ad6"}, + {file = "wrapt-1.16.0-cp38-cp38-win32.whl", hash = "sha256:c31f72b1b6624c9d863fc095da460802f43a7c6868c5dda140f51da24fd47d7b"}, + {file = "wrapt-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:490b0ee15c1a55be9c1bd8609b8cecd60e325f0575fc98f50058eae366e01f41"}, + {file = "wrapt-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9b201ae332c3637a42f02d1045e1d0cccfdc41f1f2f801dafbaa7e9b4797bfc2"}, + {file = "wrapt-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2076fad65c6736184e77d7d4729b63a6d1ae0b70da4868adeec40989858eb3fb"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5cd603b575ebceca7da5a3a251e69561bec509e0b46e4993e1cac402b7247b8"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b47cfad9e9bbbed2339081f4e346c93ecd7ab504299403320bf85f7f85c7d46c"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8212564d49c50eb4565e502814f694e240c55551a5f1bc841d4fcaabb0a9b8a"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5f15814a33e42b04e3de432e573aa557f9f0f56458745c2074952f564c50e664"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db2e408d983b0e61e238cf579c09ef7020560441906ca990fe8412153e3b291f"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:edfad1d29c73f9b863ebe7082ae9321374ccb10879eeabc84ba3b69f2579d537"}, + {file = "wrapt-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed867c42c268f876097248e05b6117a65bcd1e63b779e916fe2e33cd6fd0d3c3"}, + {file = "wrapt-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:eb1b046be06b0fce7249f1d025cd359b4b80fc1c3e24ad9eca33e0dcdb2e4a35"}, + {file = "wrapt-1.16.0-py3-none-any.whl", hash = "sha256:6906c4100a8fcbf2fa735f6059214bb13b97f75b1a61777fcf6432121ef12ef1"}, + {file = "wrapt-1.16.0.tar.gz", hash = "sha256:5f370f952971e7d17c7d1ead40e49f32345a7f7a5373571ef44d800d06b1899d"}, +] + +[[package]] +name = "wsproto" +version = "1.2.0" +description = "WebSockets state-machine based protocol implementation" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "wsproto-1.2.0-py3-none-any.whl", hash = "sha256:b9acddd652b585d75b20477888c56642fdade28bdfd3579aa24a4d2c037dd736"}, + {file = "wsproto-1.2.0.tar.gz", hash = "sha256:ad565f26ecb92588a3e43bc3d96164de84cd9902482b130d0ddbaa9664a85065"}, +] + +[package.dependencies] +h11 = ">=0.9.0,<1" + +[[package]] +name = "yarl" +version = "1.9.4" +description = "Yet another URL library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "yarl-1.9.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a8c1df72eb746f4136fe9a2e72b0c9dc1da1cbd23b5372f94b5820ff8ae30e0e"}, + {file = "yarl-1.9.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a3a6ed1d525bfb91b3fc9b690c5a21bb52de28c018530ad85093cc488bee2dd2"}, + {file = "yarl-1.9.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c38c9ddb6103ceae4e4498f9c08fac9b590c5c71b0370f98714768e22ac6fa66"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d9e09c9d74f4566e905a0b8fa668c58109f7624db96a2171f21747abc7524234"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8477c1ee4bd47c57d49621a062121c3023609f7a13b8a46953eb6c9716ca392"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5ff2c858f5f6a42c2a8e751100f237c5e869cbde669a724f2062d4c4ef93551"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:357495293086c5b6d34ca9616a43d329317feab7917518bc97a08f9e55648455"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54525ae423d7b7a8ee81ba189f131054defdb122cde31ff17477951464c1691c"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:801e9264d19643548651b9db361ce3287176671fb0117f96b5ac0ee1c3530d53"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e516dc8baf7b380e6c1c26792610230f37147bb754d6426462ab115a02944385"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:7d5aaac37d19b2904bb9dfe12cdb08c8443e7ba7d2852894ad448d4b8f442863"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:54beabb809ffcacbd9d28ac57b0db46e42a6e341a030293fb3185c409e626b8b"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bac8d525a8dbc2a1507ec731d2867025d11ceadcb4dd421423a5d42c56818541"}, + {file = "yarl-1.9.4-cp310-cp310-win32.whl", hash = "sha256:7855426dfbddac81896b6e533ebefc0af2f132d4a47340cee6d22cac7190022d"}, + {file = "yarl-1.9.4-cp310-cp310-win_amd64.whl", hash = "sha256:848cd2a1df56ddbffeb375535fb62c9d1645dde33ca4d51341378b3f5954429b"}, + {file = "yarl-1.9.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:35a2b9396879ce32754bd457d31a51ff0a9d426fd9e0e3c33394bf4b9036b099"}, + {file = "yarl-1.9.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c7d56b293cc071e82532f70adcbd8b61909eec973ae9d2d1f9b233f3d943f2c"}, + {file = "yarl-1.9.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d8a1c6c0be645c745a081c192e747c5de06e944a0d21245f4cf7c05e457c36e0"}, + {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b3c1ffe10069f655ea2d731808e76e0f452fc6c749bea04781daf18e6039525"}, + {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:549d19c84c55d11687ddbd47eeb348a89df9cb30e1993f1b128f4685cd0ebbf8"}, + {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a7409f968456111140c1c95301cadf071bd30a81cbd7ab829169fb9e3d72eae9"}, + {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e23a6d84d9d1738dbc6e38167776107e63307dfc8ad108e580548d1f2c587f42"}, + {file = "yarl-1.9.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d8b889777de69897406c9fb0b76cdf2fd0f31267861ae7501d93003d55f54fbe"}, + {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:03caa9507d3d3c83bca08650678e25364e1843b484f19986a527630ca376ecce"}, + {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4e9035df8d0880b2f1c7f5031f33f69e071dfe72ee9310cfc76f7b605958ceb9"}, + {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:c0ec0ed476f77db9fb29bca17f0a8fcc7bc97ad4c6c1d8959c507decb22e8572"}, + {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:ee04010f26d5102399bd17f8df8bc38dc7ccd7701dc77f4a68c5b8d733406958"}, + {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:49a180c2e0743d5d6e0b4d1a9e5f633c62eca3f8a86ba5dd3c471060e352ca98"}, + {file = "yarl-1.9.4-cp311-cp311-win32.whl", hash = "sha256:81eb57278deb6098a5b62e88ad8281b2ba09f2f1147c4767522353eaa6260b31"}, + {file = "yarl-1.9.4-cp311-cp311-win_amd64.whl", hash = "sha256:d1d2532b340b692880261c15aee4dc94dd22ca5d61b9db9a8a361953d36410b1"}, + {file = "yarl-1.9.4-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0d2454f0aef65ea81037759be5ca9947539667eecebca092733b2eb43c965a81"}, + {file = "yarl-1.9.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:44d8ffbb9c06e5a7f529f38f53eda23e50d1ed33c6c869e01481d3fafa6b8142"}, + {file = "yarl-1.9.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:aaaea1e536f98754a6e5c56091baa1b6ce2f2700cc4a00b0d49eca8dea471074"}, + {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3777ce5536d17989c91696db1d459574e9a9bd37660ea7ee4d3344579bb6f129"}, + {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fc5fc1eeb029757349ad26bbc5880557389a03fa6ada41703db5e068881e5f2"}, + {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea65804b5dc88dacd4a40279af0cdadcfe74b3e5b4c897aa0d81cf86927fee78"}, + {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa102d6d280a5455ad6a0f9e6d769989638718e938a6a0a2ff3f4a7ff8c62cc4"}, + {file = "yarl-1.9.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09efe4615ada057ba2d30df871d2f668af661e971dfeedf0c159927d48bbeff0"}, + {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:008d3e808d03ef28542372d01057fd09168419cdc8f848efe2804f894ae03e51"}, + {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:6f5cb257bc2ec58f437da2b37a8cd48f666db96d47b8a3115c29f316313654ff"}, + {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:992f18e0ea248ee03b5a6e8b3b4738850ae7dbb172cc41c966462801cbf62cf7"}, + {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:0e9d124c191d5b881060a9e5060627694c3bdd1fe24c5eecc8d5d7d0eb6faabc"}, + {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3986b6f41ad22988e53d5778f91855dc0399b043fc8946d4f2e68af22ee9ff10"}, + {file = "yarl-1.9.4-cp312-cp312-win32.whl", hash = "sha256:4b21516d181cd77ebd06ce160ef8cc2a5e9ad35fb1c5930882baff5ac865eee7"}, + {file = "yarl-1.9.4-cp312-cp312-win_amd64.whl", hash = "sha256:a9bd00dc3bc395a662900f33f74feb3e757429e545d831eef5bb280252631984"}, + {file = "yarl-1.9.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:63b20738b5aac74e239622d2fe30df4fca4942a86e31bf47a81a0e94c14df94f"}, + {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7d7f7de27b8944f1fee2c26a88b4dabc2409d2fea7a9ed3df79b67277644e17"}, + {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c74018551e31269d56fab81a728f683667e7c28c04e807ba08f8c9e3bba32f14"}, + {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ca06675212f94e7a610e85ca36948bb8fc023e458dd6c63ef71abfd482481aa5"}, + {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5aef935237d60a51a62b86249839b51345f47564208c6ee615ed2a40878dccdd"}, + {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b134fd795e2322b7684155b7855cc99409d10b2e408056db2b93b51a52accc7"}, + {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d25039a474c4c72a5ad4b52495056f843a7ff07b632c1b92ea9043a3d9950f6e"}, + {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f7d6b36dd2e029b6bcb8a13cf19664c7b8e19ab3a58e0fefbb5b8461447ed5ec"}, + {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:957b4774373cf6f709359e5c8c4a0af9f6d7875db657adb0feaf8d6cb3c3964c"}, + {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:d7eeb6d22331e2fd42fce928a81c697c9ee2d51400bd1a28803965883e13cead"}, + {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:6a962e04b8f91f8c4e5917e518d17958e3bdee71fd1d8b88cdce74dd0ebbf434"}, + {file = "yarl-1.9.4-cp37-cp37m-win32.whl", hash = "sha256:f3bc6af6e2b8f92eced34ef6a96ffb248e863af20ef4fde9448cc8c9b858b749"}, + {file = "yarl-1.9.4-cp37-cp37m-win_amd64.whl", hash = "sha256:ad4d7a90a92e528aadf4965d685c17dacff3df282db1121136c382dc0b6014d2"}, + {file = "yarl-1.9.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ec61d826d80fc293ed46c9dd26995921e3a82146feacd952ef0757236fc137be"}, + {file = "yarl-1.9.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8be9e837ea9113676e5754b43b940b50cce76d9ed7d2461df1af39a8ee674d9f"}, + {file = "yarl-1.9.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bef596fdaa8f26e3d66af846bbe77057237cb6e8efff8cd7cc8dff9a62278bbf"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d47552b6e52c3319fede1b60b3de120fe83bde9b7bddad11a69fb0af7db32f1"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84fc30f71689d7fc9168b92788abc977dc8cefa806909565fc2951d02f6b7d57"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4aa9741085f635934f3a2583e16fcf62ba835719a8b2b28fb2917bb0537c1dfa"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:206a55215e6d05dbc6c98ce598a59e6fbd0c493e2de4ea6cc2f4934d5a18d130"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07574b007ee20e5c375a8fe4a0789fad26db905f9813be0f9fef5a68080de559"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5a2e2433eb9344a163aced6a5f6c9222c0786e5a9e9cac2c89f0b28433f56e23"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:6ad6d10ed9b67a382b45f29ea028f92d25bc0bc1daf6c5b801b90b5aa70fb9ec"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:6fe79f998a4052d79e1c30eeb7d6c1c1056ad33300f682465e1b4e9b5a188b78"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a825ec844298c791fd28ed14ed1bffc56a98d15b8c58a20e0e08c1f5f2bea1be"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8619d6915b3b0b34420cf9b2bb6d81ef59d984cb0fde7544e9ece32b4b3043c3"}, + {file = "yarl-1.9.4-cp38-cp38-win32.whl", hash = "sha256:686a0c2f85f83463272ddffd4deb5e591c98aac1897d65e92319f729c320eece"}, + {file = "yarl-1.9.4-cp38-cp38-win_amd64.whl", hash = "sha256:a00862fb23195b6b8322f7d781b0dc1d82cb3bcac346d1e38689370cc1cc398b"}, + {file = "yarl-1.9.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:604f31d97fa493083ea21bd9b92c419012531c4e17ea6da0f65cacdcf5d0bd27"}, + {file = "yarl-1.9.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8a854227cf581330ffa2c4824d96e52ee621dd571078a252c25e3a3b3d94a1b1"}, + {file = "yarl-1.9.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ba6f52cbc7809cd8d74604cce9c14868306ae4aa0282016b641c661f981a6e91"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a6327976c7c2f4ee6816eff196e25385ccc02cb81427952414a64811037bbc8b"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8397a3817d7dcdd14bb266283cd1d6fc7264a48c186b986f32e86d86d35fbac5"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e0381b4ce23ff92f8170080c97678040fc5b08da85e9e292292aba67fdac6c34"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23d32a2594cb5d565d358a92e151315d1b2268bc10f4610d098f96b147370136"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ddb2a5c08a4eaaba605340fdee8fc08e406c56617566d9643ad8bf6852778fc7"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:26a1dc6285e03f3cc9e839a2da83bcbf31dcb0d004c72d0730e755b33466c30e"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:18580f672e44ce1238b82f7fb87d727c4a131f3a9d33a5e0e82b793362bf18b4"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:29e0f83f37610f173eb7e7b5562dd71467993495e568e708d99e9d1944f561ec"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:1f23e4fe1e8794f74b6027d7cf19dc25f8b63af1483d91d595d4a07eca1fb26c"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:db8e58b9d79200c76956cefd14d5c90af54416ff5353c5bfd7cbe58818e26ef0"}, + {file = "yarl-1.9.4-cp39-cp39-win32.whl", hash = "sha256:c7224cab95645c7ab53791022ae77a4509472613e839dab722a72abe5a684575"}, + {file = "yarl-1.9.4-cp39-cp39-win_amd64.whl", hash = "sha256:824d6c50492add5da9374875ce72db7a0733b29c2394890aef23d533106e2b15"}, + {file = "yarl-1.9.4-py3-none-any.whl", hash = "sha256:928cecb0ef9d5a7946eb6ff58417ad2fe9375762382f1bf5c55e61645f2c43ad"}, + {file = "yarl-1.9.4.tar.gz", hash = "sha256:566db86717cf8080b99b58b083b773a908ae40f06681e87e589a976faf8246bf"}, +] + +[package.dependencies] +idna = ">=2.0" +multidict = ">=4.0" + +[[package]] +name = "zipp" +version = "3.17.0" +description = "Backport of pathlib-compatible object wrapper for zip files" +optional = false +python-versions = ">=3.8" +files = [ + {file = "zipp-3.17.0-py3-none-any.whl", hash = "sha256:0e923e726174922dce09c53c59ad483ff7bbb8e572e00c7f7c46b88556409f31"}, + {file = "zipp-3.17.0.tar.gz", hash = "sha256:84e64a1c28cf7e91ed2078bb8cc8c259cb19b76942096c8d7b84947690cabaf0"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy (>=0.9.1)", "pytest-ruff"] + +[extras] +benchmark = ["agbenchmark"] + +[metadata] +lock-version = "2.0" +python-versions = "^3.10" +content-hash = "afa9674e032573e483e977e818766efe18ea4f52efa1ce6dfc71686772371b5b" diff --git a/autogpts/autogpt/prompt_settings.yaml b/autogpts/autogpt/prompt_settings.yaml new file mode 100644 index 000000000000..438183803ff8 --- /dev/null +++ b/autogpts/autogpt/prompt_settings.yaml @@ -0,0 +1,17 @@ +constraints: [ + 'Exclusively use the commands listed below.', + 'You can only act proactively, and are unable to start background jobs or set up webhooks for yourself. Take this into account when planning your actions.', + 'You are unable to interact with physical objects. If this is absolutely necessary to fulfill a task or objective or to complete a step, you must ask the user to do it for you. If the user refuses this, and there is no other way to achieve your goals, you must terminate to avoid wasting time and energy.' +] +resources: [ + 'Internet access for searches and information gathering.', + 'The ability to read and write files.', + 'You are a Large Language Model, trained on millions of pages of text, including a lot of factual knowledge. Make use of this factual knowledge to avoid unnecessary gathering of information.' +] +best_practices: [ + 'Continuously review and analyze your actions to ensure you are performing to the best of your abilities.', + 'Constructively self-criticize your big-picture behavior constantly.', + 'Reflect on past decisions and strategies to refine your approach.', + 'Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.', + 'Only make use of your information gathering abilities to find information that you don''t yet have knowledge of.' +] diff --git a/autogpts/autogpt/pyproject.toml b/autogpts/autogpt/pyproject.toml new file mode 100644 index 000000000000..404bffaaa5f4 --- /dev/null +++ b/autogpts/autogpt/pyproject.toml @@ -0,0 +1,163 @@ +[tool.poetry] +name = "agpt" +version = "0.5.0" +authors = [ + "Significant Gravitas ", +] +readme = "README.md" +description = "An open-source attempt to make GPT-4 autonomous" +homepage = "https://github.com/Significant-Gravitas/AutoGPT/tree/master/autogpts/autogpt" +classifiers = [ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", +] +packages = [{ include = "autogpt" }] + + +[tool.poetry.scripts] +autogpt = "autogpt.app.cli:cli" +serve = "autogpt.app.cli:serve" + + +[tool.poetry.dependencies] +python = "^3.10" +auto-gpt-plugin-template = {git = "https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template", rev = "0.1.0"} +# autogpt-forge = { path = "../forge" } +autogpt-forge = {git = "https://github.com/Significant-Gravitas/AutoGPT.git", rev = "ab05b7ae70754c063909", subdirectory = "autogpts/forge"} +beautifulsoup4 = "^4.12.2" +boto3 = "^1.33.6" +charset-normalizer = "^3.1.0" +click = "*" +colorama = "^0.4.6" +demjson3 = "^3.0.0" +distro = "^1.8.0" +docker = "*" +duckduckgo-search = "^5.0.0" +en-core-web-sm = {url = "https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.5.0/en_core_web_sm-3.5.0-py3-none-any.whl"} +fastapi = "^0.109.1" +ftfy = "^6.1.1" +gitpython = "^3.1.32" +google-api-python-client = "*" +gTTS = "^2.3.1" +hypercorn = "^0.14.4" +inflection = "*" +jsonschema = "*" +numpy = "*" +openai = "^1.7.2" +orjson = "^3.8.10" +Pillow = "*" +pinecone-client = "^2.2.1" +playsound = "~1.2.2" +pydantic = "*" +pylatexenc = "*" +pypdf = "^3.1.0" +python-docx = "*" +python-dotenv = "^1.0.0" +pyyaml = "^6.0" +readability-lxml = "^0.8.1" +redis = "*" +requests = "*" +selenium = "^4.11.2" +sentry-sdk = "^1.40.4" +spacy = "^3.0.0" +tenacity = "^8.2.2" +tiktoken = "^0.5.0" +webdriver-manager = "*" + +# OpenAI and Generic plugins import +openapi-python-client = "^0.14.0" + +# Benchmarking +agbenchmark = { path = "../../benchmark", optional = true } +# agbenchmark = {git = "https://github.com/Significant-Gravitas/AutoGPT.git", subdirectory = "benchmark", optional = true} +google-cloud-logging = "^3.8.0" +google-cloud-storage = "^2.13.0" +psycopg2-binary = "^2.9.9" + +[tool.poetry.extras] +benchmark = ["agbenchmark"] + +[tool.poetry.group.dev.dependencies] +black = "*" +boto3-stubs = {extras = ["s3"], version = "^1.33.6"} +flake8 = "*" +gitpython = "^3.1.32" +isort = "*" +mypy = "*" +pre-commit = "*" +types-beautifulsoup4 = "*" +types-colorama = "*" +types-Markdown = "*" +types-Pillow = "*" + +# Testing +asynctest = "*" +coverage = "*" +pytest = "*" +pytest-asyncio = "*" +pytest-benchmark = "*" +pytest-cov = "*" +pytest-integration = "*" +pytest-mock = "*" +pytest-recording = "*" +pytest-xdist = "*" +vcrpy = {git = "https://github.com/Significant-Gravitas/vcrpy.git", rev = "master"} + + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" + + +[tool.black] +line-length = 88 +target-version = ['py310'] +include = '\.pyi?$' +packages = ["autogpt"] +extend-exclude = '.+/(dist|.venv|venv|build|data)/.+' + + +[tool.isort] +profile = "black" +multi_line_output = 3 +include_trailing_comma = true +force_grid_wrap = 0 +use_parentheses = true +ensure_newline_before_comments = true +line_length = 88 +sections = [ + "FUTURE", + "STDLIB", + "THIRDPARTY", + "FIRSTPARTY", + "LOCALFOLDER" +] +extend_skip = [ + "agbenchmark_config/temp_folder/", + "data/", +] + + +[tool.mypy] +follow_imports = 'skip' +check_untyped_defs = true +disallow_untyped_calls = true +files = [ + 'autogpt/**/*.py', + 'tests/**/*.py' +] + +[[tool.mypy.overrides]] +module = [ + 'requests.*', + 'yaml.*' +] +ignore_missing_imports = true + + +[tool.pytest.ini_options] +markers = [ + "requires_openai_api_key", + "requires_huggingface_api_key" +] diff --git a/autogpts/autogpt/run b/autogpts/autogpt/run new file mode 100644 index 000000000000..eebf7fe0f97f --- /dev/null +++ b/autogpts/autogpt/run @@ -0,0 +1,10 @@ +#!/bin/sh + +kill $(lsof -t -i :8000) + +if [ ! -f .env ] && [ -z "$OPENAI_API_KEY" ]; then + cp .env.example .env + echo "Please add your api keys to the .env file." >&2 + # exit 1 +fi +poetry run serve --debug diff --git a/autogpts/autogpt/run_benchmark b/autogpts/autogpt/run_benchmark new file mode 100644 index 000000000000..7264079412b2 --- /dev/null +++ b/autogpts/autogpt/run_benchmark @@ -0,0 +1,9 @@ +#!/bin/sh + +# Kill processes using port 8080 if any. +if lsof -t -i :8080; then + kill $(lsof -t -i :8080) +fi +# This is the cli entry point for the benchmarking tool. +# To run this in server mode pass in `serve` as the first argument. +poetry run agbenchmark "$@" diff --git a/autogpts/autogpt/scripts/__init__.py b/autogpts/autogpt/scripts/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/autogpts/autogpt/scripts/check_requirements.py b/autogpts/autogpt/scripts/check_requirements.py new file mode 100644 index 000000000000..c62520825cdf --- /dev/null +++ b/autogpts/autogpt/scripts/check_requirements.py @@ -0,0 +1,38 @@ +import contextlib +import os +import sys +from importlib.metadata import version + +try: + import poetry.factory # noqa +except ModuleNotFoundError: + os.system(f"{sys.executable} -m pip install 'poetry>=1.6.1,<2.0.0'") + +from poetry.core.constraints.version.version import Version +from poetry.factory import Factory + + +def main(): + poetry_project = Factory().create_poetry() + dependency_group = poetry_project.package.dependency_group("main") + + missing_packages = [] + for dep in dependency_group.dependencies: + if dep.is_optional(): + continue + # Try to verify that the installed version is suitable + with contextlib.suppress(ModuleNotFoundError): + installed_version = version(dep.name) # if this fails -> not installed + if dep.constraint.allows(Version.parse(installed_version)): + continue + # If the above verification fails, mark the package as missing + missing_packages.append(str(dep)) + + if missing_packages: + print("Missing packages:") + print(", ".join(missing_packages)) + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/autogpts/autogpt/scripts/install_plugin_deps.py b/autogpts/autogpt/scripts/install_plugin_deps.py new file mode 100644 index 000000000000..02cbfc0be920 --- /dev/null +++ b/autogpts/autogpt/scripts/install_plugin_deps.py @@ -0,0 +1,66 @@ +import logging +import os +import subprocess +import sys +import zipfile +from glob import glob +from pathlib import Path + +logger = logging.getLogger(__name__) + + +def install_plugin_dependencies(): + """ + Installs dependencies for all plugins in the plugins dir. + + Args: + None + + Returns: + None + """ + plugins_dir = Path(os.getenv("PLUGINS_DIR", "plugins")) + + logger.debug("Checking for dependencies in zipped plugins...") + + # Install zip-based plugins + for plugin_archive in plugins_dir.glob("*.zip"): + logger.debug(f"Checking for requirements in '{plugin_archive}'...") + with zipfile.ZipFile(str(plugin_archive), "r") as zfile: + if not zfile.namelist(): + continue + + # Assume the first entry in the list will be (in) the lowest common dir + first_entry = zfile.namelist()[0] + basedir = first_entry.rsplit("/", 1)[0] if "/" in first_entry else "" + logger.debug(f"Looking for requirements.txt in '{basedir}'") + + basereqs = os.path.join(basedir, "requirements.txt") + try: + extracted = zfile.extract(basereqs, path=plugins_dir) + except KeyError as e: + logger.debug(e.args[0]) + continue + + logger.debug(f"Installing dependencies from '{basereqs}'...") + subprocess.check_call( + [sys.executable, "-m", "pip", "install", "-r", extracted] + ) + os.remove(extracted) + os.rmdir(os.path.join(plugins_dir, basedir)) + + logger.debug("Checking for dependencies in other plugin folders...") + + # Install directory-based plugins + for requirements_file in glob(f"{plugins_dir}/*/requirements.txt"): + logger.debug(f"Installing dependencies from '{requirements_file}'...") + subprocess.check_call( + [sys.executable, "-m", "pip", "install", "-r", requirements_file], + stdout=subprocess.DEVNULL, + ) + + logger.debug("Finished installing plugin dependencies") + + +if __name__ == "__main__": + install_plugin_dependencies() diff --git a/autogpts/autogpt/setup b/autogpts/autogpt/setup new file mode 100644 index 000000000000..8263dce32071 --- /dev/null +++ b/autogpts/autogpt/setup @@ -0,0 +1,8 @@ +#!/bin/sh + +# Necessary to prevent forge and agbenchmark from breaking each others' install: +# https://github.com/python-poetry/poetry/issues/6958 +POETRY_INSTALLER_PARALLEL=false \ +poetry install --no-interaction --extras benchmark + +echo "Setup completed successfully." diff --git a/autogpts/autogpt/tests/__init__.py b/autogpts/autogpt/tests/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/autogpts/autogpt/tests/conftest.py b/autogpts/autogpt/tests/conftest.py new file mode 100644 index 000000000000..6f796c2b508b --- /dev/null +++ b/autogpts/autogpt/tests/conftest.py @@ -0,0 +1,147 @@ +from __future__ import annotations + +import os +import uuid +from pathlib import Path +from tempfile import TemporaryDirectory + +import pytest +import yaml +from pytest_mock import MockerFixture + +from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings +from autogpt.app.main import _configure_openai_provider +from autogpt.config import AIProfile, Config, ConfigBuilder +from autogpt.core.resource.model_providers import ChatModelProvider, OpenAIProvider +from autogpt.file_storage.local import ( + FileStorage, + FileStorageConfiguration, + LocalFileStorage, +) +from autogpt.logs.config import configure_logging +from autogpt.models.command_registry import CommandRegistry + +pytest_plugins = [ + "tests.integration.agent_factory", + "tests.integration.memory.utils", + "tests.vcr", +] + + +@pytest.fixture() +def tmp_project_root(tmp_path: Path) -> Path: + return tmp_path + + +@pytest.fixture() +def app_data_dir(tmp_project_root: Path) -> Path: + dir = tmp_project_root / "data" + dir.mkdir(parents=True, exist_ok=True) + return dir + + +@pytest.fixture() +def storage(app_data_dir: Path) -> FileStorage: + storage = LocalFileStorage( + FileStorageConfiguration(root=app_data_dir, restrict_to_root=False) + ) + storage.initialize() + return storage + + +@pytest.fixture +def temp_plugins_config_file(): + """ + Create a plugins_config.yaml file in a temp directory + so that it doesn't mess with existing ones. + """ + config_directory = TemporaryDirectory() + config_file = Path(config_directory.name) / "plugins_config.yaml" + with open(config_file, "w+") as f: + f.write(yaml.dump({})) + + yield config_file + + +@pytest.fixture(scope="function") +def config( + temp_plugins_config_file: Path, + tmp_project_root: Path, + app_data_dir: Path, + mocker: MockerFixture, +): + if not os.environ.get("OPENAI_API_KEY"): + os.environ["OPENAI_API_KEY"] = "sk-dummy" + config = ConfigBuilder.build_config_from_env(project_root=tmp_project_root) + + config.app_data_dir = app_data_dir + + config.plugins_dir = "tests/unit/data/test_plugins" + config.plugins_config_file = temp_plugins_config_file + + config.noninteractive_mode = True + + # avoid circular dependency + from autogpt.plugins.plugins_config import PluginsConfig + + config.plugins_config = PluginsConfig.load_config( + plugins_config_file=config.plugins_config_file, + plugins_denylist=config.plugins_denylist, + plugins_allowlist=config.plugins_allowlist, + ) + yield config + + +@pytest.fixture(scope="session") +def setup_logger(config: Config): + configure_logging( + debug=True, + log_dir=Path(__file__).parent / "logs", + plain_console_output=True, + ) + + +@pytest.fixture +def llm_provider(config: Config) -> OpenAIProvider: + return _configure_openai_provider(config) + + +@pytest.fixture +def agent( + config: Config, llm_provider: ChatModelProvider, storage: FileStorage +) -> Agent: + ai_profile = AIProfile( + ai_name="Base", + ai_role="A base AI", + ai_goals=[], + ) + + command_registry = CommandRegistry() + + agent_prompt_config = Agent.default_settings.prompt_config.copy(deep=True) + agent_prompt_config.use_functions_api = config.openai_functions + + agent_settings = AgentSettings( + name=Agent.default_settings.name, + description=Agent.default_settings.description, + agent_id=f"AutoGPT-test-agent-{str(uuid.uuid4())[:8]}", + ai_profile=ai_profile, + config=AgentConfiguration( + fast_llm=config.fast_llm, + smart_llm=config.smart_llm, + allow_fs_access=not config.restrict_to_workspace, + use_functions_api=config.openai_functions, + plugins=config.plugins, + ), + prompt_config=agent_prompt_config, + history=Agent.default_settings.history.copy(deep=True), + ) + + agent = Agent( + settings=agent_settings, + llm_provider=llm_provider, + command_registry=command_registry, + file_storage=storage, + legacy_config=config, + ) + return agent diff --git a/autogpts/autogpt/tests/context.py b/autogpts/autogpt/tests/context.py new file mode 100644 index 000000000000..0d531468db14 --- /dev/null +++ b/autogpts/autogpt/tests/context.py @@ -0,0 +1,7 @@ +import os +import sys + +# Add the scripts directory to the path so that we can import the browse module. +sys.path.insert( + 0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../scripts")) +) diff --git a/autogpts/autogpt/tests/integration/__init__.py b/autogpts/autogpt/tests/integration/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/autogpts/autogpt/tests/integration/agent_factory.py b/autogpts/autogpt/tests/integration/agent_factory.py new file mode 100644 index 000000000000..dfff73b931a7 --- /dev/null +++ b/autogpts/autogpt/tests/integration/agent_factory.py @@ -0,0 +1,56 @@ +import pytest + +from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings +from autogpt.config import AIProfile, Config +from autogpt.memory.vector import get_memory +from autogpt.models.command_registry import CommandRegistry + + +@pytest.fixture +def memory_json_file(config: Config): + was_memory_backend = config.memory_backend + + config.memory_backend = "json_file" + memory = get_memory(config) + memory.clear() + yield memory + + config.memory_backend = was_memory_backend + + +@pytest.fixture +def dummy_agent(config: Config, llm_provider, memory_json_file): + command_registry = CommandRegistry() + + ai_profile = AIProfile( + ai_name="Dummy Agent", + ai_role="Dummy Role", + ai_goals=[ + "Dummy Task", + ], + ) + + agent_prompt_config = Agent.default_settings.prompt_config.copy(deep=True) + agent_prompt_config.use_functions_api = config.openai_functions + agent_settings = AgentSettings( + name=Agent.default_settings.name, + description=Agent.default_settings.description, + ai_profile=ai_profile, + config=AgentConfiguration( + fast_llm=config.fast_llm, + smart_llm=config.smart_llm, + use_functions_api=config.openai_functions, + plugins=config.plugins, + ), + prompt_config=agent_prompt_config, + history=Agent.default_settings.history.copy(deep=True), + ) + + agent = Agent( + settings=agent_settings, + llm_provider=llm_provider, + command_registry=command_registry, + legacy_config=config, + ) + + return agent diff --git a/autogpts/autogpt/tests/integration/memory/__init__.py b/autogpts/autogpt/tests/integration/memory/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/autogpts/autogpt/tests/integration/memory/_test_json_file_memory.py b/autogpts/autogpt/tests/integration/memory/_test_json_file_memory.py new file mode 100644 index 000000000000..94bf0d1bdee8 --- /dev/null +++ b/autogpts/autogpt/tests/integration/memory/_test_json_file_memory.py @@ -0,0 +1,126 @@ +# sourcery skip: snake-case-functions +"""Tests for JSONFileMemory class""" +import orjson +import pytest + +from autogpt.config import Config +from autogpt.file_storage import FileStorage +from autogpt.memory.vector import JSONFileMemory, MemoryItem + + +def test_json_memory_init_without_backing_file(config: Config, storage: FileStorage): + index_file = storage.root / f"{config.memory_index}.json" + + assert not index_file.exists() + JSONFileMemory(config) + assert index_file.exists() + assert index_file.read_text() == "[]" + + +def test_json_memory_init_with_backing_empty_file(config: Config, storage: FileStorage): + index_file = storage.root / f"{config.memory_index}.json" + index_file.touch() + + assert index_file.exists() + JSONFileMemory(config) + assert index_file.exists() + assert index_file.read_text() == "[]" + + +def test_json_memory_init_with_backing_invalid_file( + config: Config, storage: FileStorage +): + index_file = storage.root / f"{config.memory_index}.json" + index_file.touch() + + raw_data = {"texts": ["test"]} + data = orjson.dumps(raw_data, option=JSONFileMemory.SAVE_OPTIONS) + with index_file.open("wb") as f: + f.write(data) + + assert index_file.exists() + JSONFileMemory(config) + assert index_file.exists() + assert index_file.read_text() == "[]" + + +def test_json_memory_add(config: Config, memory_item: MemoryItem): + index = JSONFileMemory(config) + index.add(memory_item) + assert index.memories[0] == memory_item + + +def test_json_memory_clear(config: Config, memory_item: MemoryItem): + index = JSONFileMemory(config) + assert index.memories == [] + + index.add(memory_item) + assert index.memories[0] == memory_item, "Cannot test clear() because add() fails" + + index.clear() + assert index.memories == [] + + +def test_json_memory_get(config: Config, memory_item: MemoryItem, mock_get_embedding): + index = JSONFileMemory(config) + assert ( + index.get("test", config) is None + ), "Cannot test get() because initial index is not empty" + + index.add(memory_item) + retrieved = index.get("test", config) + assert retrieved is not None + assert retrieved.memory_item == memory_item + + +def test_json_memory_load_index(config: Config, memory_item: MemoryItem): + index = JSONFileMemory(config) + index.add(memory_item) + + try: + assert index.file_path.exists(), "index was not saved to file" + assert len(index) == 1, f"index contains {len(index)} items instead of 1" + assert index.memories[0] == memory_item, "item in index != added mock item" + except AssertionError as e: + raise ValueError(f"Setting up for load_index test failed: {e}") + + index.memories = [] + index.load_index() + + assert len(index) == 1 + assert index.memories[0] == memory_item + + +@pytest.mark.vcr +@pytest.mark.requires_openai_api_key +def test_json_memory_get_relevant(config: Config, cached_openai_client: None) -> None: + index = JSONFileMemory(config) + mem1 = MemoryItem.from_text_file("Sample text", "sample.txt", config) + mem2 = MemoryItem.from_text_file( + "Grocery list:\n- Pancake mix", "groceries.txt", config + ) + mem3 = MemoryItem.from_text_file( + "What is your favorite color?", "color.txt", config + ) + lipsum = "Lorem ipsum dolor sit amet" + mem4 = MemoryItem.from_text_file(" ".join([lipsum] * 100), "lipsum.txt", config) + index.add(mem1) + index.add(mem2) + index.add(mem3) + index.add(mem4) + + assert index.get_relevant(mem1.raw_content, 1, config)[0].memory_item == mem1 + assert index.get_relevant(mem2.raw_content, 1, config)[0].memory_item == mem2 + assert index.get_relevant(mem3.raw_content, 1, config)[0].memory_item == mem3 + assert [mr.memory_item for mr in index.get_relevant(lipsum, 2, config)] == [ + mem4, + mem1, + ] + + +def test_json_memory_get_stats(config: Config, memory_item: MemoryItem) -> None: + index = JSONFileMemory(config) + index.add(memory_item) + n_memories, n_chunks = index.get_stats() + assert n_memories == 1 + assert n_chunks == 1 diff --git a/autogpts/autogpt/tests/integration/memory/conftest.py b/autogpts/autogpt/tests/integration/memory/conftest.py new file mode 100644 index 000000000000..64ac651deed2 --- /dev/null +++ b/autogpts/autogpt/tests/integration/memory/conftest.py @@ -0,0 +1,17 @@ +import pytest + +from autogpt.memory.vector.memory_item import MemoryItem +from autogpt.memory.vector.utils import Embedding + + +@pytest.fixture +def memory_item(mock_embedding: Embedding): + return MemoryItem( + raw_content="test content", + summary="test content summary", + chunks=["test content"], + chunk_summaries=["test content summary"], + e_summary=mock_embedding, + e_chunks=[mock_embedding], + metadata={}, + ) diff --git a/autogpts/autogpt/tests/integration/memory/utils.py b/autogpts/autogpt/tests/integration/memory/utils.py new file mode 100644 index 000000000000..aea12832fc2b --- /dev/null +++ b/autogpts/autogpt/tests/integration/memory/utils.py @@ -0,0 +1,44 @@ +import numpy +import pytest +from pytest_mock import MockerFixture + +import autogpt.memory.vector.memory_item as vector_memory_item +import autogpt.memory.vector.providers.base as memory_provider_base +from autogpt.config.config import Config +from autogpt.core.resource.model_providers import OPEN_AI_EMBEDDING_MODELS +from autogpt.memory.vector import get_memory +from autogpt.memory.vector.utils import Embedding + + +@pytest.fixture +def embedding_dimension(config: Config): + return OPEN_AI_EMBEDDING_MODELS[config.embedding_model].embedding_dimensions + + +@pytest.fixture +def mock_embedding(embedding_dimension: int) -> Embedding: + return numpy.full((1, embedding_dimension), 0.0255, numpy.float32)[0] + + +@pytest.fixture +def mock_get_embedding(mocker: MockerFixture, mock_embedding: Embedding): + mocker.patch.object( + vector_memory_item, + "get_embedding", + return_value=mock_embedding, + ) + mocker.patch.object( + memory_provider_base, + "get_embedding", + return_value=mock_embedding, + ) + + +@pytest.fixture +def memory_none(agent_test_config: Config, mock_get_embedding): + was_memory_backend = agent_test_config.memory_backend + + agent_test_config.memory_backend = "no_memory" + yield get_memory(agent_test_config) + + agent_test_config.memory_backend = was_memory_backend diff --git a/autogpts/autogpt/tests/integration/test_execute_code.py b/autogpts/autogpt/tests/integration/test_execute_code.py new file mode 100644 index 000000000000..b8667b475b72 --- /dev/null +++ b/autogpts/autogpt/tests/integration/test_execute_code.py @@ -0,0 +1,127 @@ +import random +import string +import tempfile +from pathlib import Path + +import pytest + +import autogpt.commands.execute_code as sut # system under testing +from autogpt.agents.agent import Agent +from autogpt.agents.utils.exceptions import ( + InvalidArgumentError, + OperationNotAllowedError, +) + + +@pytest.fixture +def random_code(random_string) -> str: + return f"print('Hello {random_string}!')" + + +@pytest.fixture +def python_test_file(agent: Agent, random_code: str): + temp_file = tempfile.NamedTemporaryFile(dir=agent.workspace.root, suffix=".py") + temp_file.write(str.encode(random_code)) + temp_file.flush() + + yield Path(temp_file.name) + temp_file.close() + + +@pytest.fixture +def python_test_args_file(agent: Agent): + temp_file = tempfile.NamedTemporaryFile(dir=agent.workspace.root, suffix=".py") + temp_file.write(str.encode("import sys\nprint(sys.argv[1], sys.argv[2])")) + temp_file.flush() + + yield Path(temp_file.name) + temp_file.close() + + +@pytest.fixture +def random_string(): + return "".join(random.choice(string.ascii_lowercase) for _ in range(10)) + + +def test_execute_python_file(python_test_file: Path, random_string: str, agent: Agent): + if not (sut.is_docker_available() or sut.we_are_running_in_a_docker_container()): + pytest.skip("Docker is not available") + + result: str = sut.execute_python_file(python_test_file, agent=agent) + assert result.replace("\r", "") == f"Hello {random_string}!\n" + + +def test_execute_python_file_args( + python_test_args_file: Path, random_string: str, agent: Agent +): + if not (sut.is_docker_available() or sut.we_are_running_in_a_docker_container()): + pytest.skip("Docker is not available") + + random_args = [random_string] * 2 + random_args_string = " ".join(random_args) + result = sut.execute_python_file( + python_test_args_file, args=random_args, agent=agent + ) + assert result == f"{random_args_string}\n" + + +def test_execute_python_code(random_code: str, random_string: str, agent: Agent): + if not (sut.is_docker_available() or sut.we_are_running_in_a_docker_container()): + pytest.skip("Docker is not available") + + result: str = sut.execute_python_code(random_code, agent=agent) + assert result.replace("\r", "") == f"Hello {random_string}!\n" + + +def test_execute_python_file_invalid(agent: Agent): + with pytest.raises(InvalidArgumentError): + sut.execute_python_file(Path("not_python.txt"), agent) + + +def test_execute_python_file_not_found(agent: Agent): + with pytest.raises( + FileNotFoundError, + match=r"python: can't open file '([a-zA-Z]:)?[/\\\-\w]*notexist.py': " + r"\[Errno 2\] No such file or directory", + ): + sut.execute_python_file(Path("notexist.py"), agent) + + +def test_execute_shell(random_string: str, agent: Agent): + result = sut.execute_shell(f"echo 'Hello {random_string}!'", agent) + assert f"Hello {random_string}!" in result + + +def test_execute_shell_local_commands_not_allowed(random_string: str, agent: Agent): + result = sut.execute_shell(f"echo 'Hello {random_string}!'", agent) + assert f"Hello {random_string}!" in result + + +def test_execute_shell_denylist_should_deny(agent: Agent, random_string: str): + agent.legacy_config.shell_denylist = ["echo"] + + with pytest.raises(OperationNotAllowedError, match="not allowed"): + sut.execute_shell(f"echo 'Hello {random_string}!'", agent) + + +def test_execute_shell_denylist_should_allow(agent: Agent, random_string: str): + agent.legacy_config.shell_denylist = ["cat"] + + result = sut.execute_shell(f"echo 'Hello {random_string}!'", agent) + assert "Hello" in result and random_string in result + + +def test_execute_shell_allowlist_should_deny(agent: Agent, random_string: str): + agent.legacy_config.shell_command_control = sut.ALLOWLIST_CONTROL + agent.legacy_config.shell_allowlist = ["cat"] + + with pytest.raises(OperationNotAllowedError, match="not allowed"): + sut.execute_shell(f"echo 'Hello {random_string}!'", agent) + + +def test_execute_shell_allowlist_should_allow(agent: Agent, random_string: str): + agent.legacy_config.shell_command_control = sut.ALLOWLIST_CONTROL + agent.legacy_config.shell_allowlist = ["echo"] + + result = sut.execute_shell(f"echo 'Hello {random_string}!'", agent) + assert "Hello" in result and random_string in result diff --git a/autogpts/autogpt/tests/integration/test_image_gen.py b/autogpts/autogpt/tests/integration/test_image_gen.py new file mode 100644 index 000000000000..e5c1c555ed89 --- /dev/null +++ b/autogpts/autogpt/tests/integration/test_image_gen.py @@ -0,0 +1,235 @@ +import functools +import hashlib +from pathlib import Path +from unittest.mock import patch + +import pytest +from PIL import Image + +from autogpt.agents.agent import Agent +from autogpt.commands.image_gen import generate_image, generate_image_with_sd_webui + + +@pytest.fixture(params=[256, 512, 1024]) +def image_size(request): + """Parametrize image size.""" + return request.param + + +@pytest.mark.requires_openai_api_key +@pytest.mark.vcr +def test_dalle(agent: Agent, storage, image_size, cached_openai_client): + """Test DALL-E image generation.""" + generate_and_validate( + agent, + storage, + image_provider="dalle", + image_size=image_size, + ) + + +@pytest.mark.xfail( + reason="The image is too big to be put in a cassette for a CI pipeline. " + "We're looking into a solution." +) +@pytest.mark.requires_huggingface_api_key +@pytest.mark.parametrize( + "image_model", + ["CompVis/stable-diffusion-v1-4", "stabilityai/stable-diffusion-2-1"], +) +def test_huggingface(agent: Agent, storage, image_size, image_model): + """Test HuggingFace image generation.""" + generate_and_validate( + agent, + storage, + image_provider="huggingface", + image_size=image_size, + hugging_face_image_model=image_model, + ) + + +@pytest.mark.xfail(reason="SD WebUI call does not work.") +def test_sd_webui(agent: Agent, storage, image_size): + """Test SD WebUI image generation.""" + generate_and_validate( + agent, + storage, + image_provider="sd_webui", + image_size=image_size, + ) + + +@pytest.mark.xfail(reason="SD WebUI call does not work.") +def test_sd_webui_negative_prompt(agent: Agent, storage, image_size): + gen_image = functools.partial( + generate_image_with_sd_webui, + prompt="astronaut riding a horse", + agent=agent, + size=image_size, + extra={"seed": 123}, + ) + + # Generate an image with a negative prompt + image_path = lst( + gen_image(negative_prompt="horse", output_file=Path("negative.jpg")) + ) + with Image.open(image_path) as img: + neg_image_hash = hashlib.md5(img.tobytes()).hexdigest() + + # Generate an image without a negative prompt + image_path = lst(gen_image(output_file=Path("positive.jpg"))) + with Image.open(image_path) as img: + image_hash = hashlib.md5(img.tobytes()).hexdigest() + + assert image_hash != neg_image_hash + + +def lst(txt): + """Extract the file path from the output of `generate_image()`""" + return Path(txt.split(": ", maxsplit=1)[1].strip()) + + +def generate_and_validate( + agent: Agent, + storage, + image_size, + image_provider, + hugging_face_image_model=None, + **kwargs, +): + """Generate an image and validate the output.""" + agent.legacy_config.image_provider = image_provider + if hugging_face_image_model: + agent.legacy_config.huggingface_image_model = hugging_face_image_model + prompt = "astronaut riding a horse" + + image_path = lst(generate_image(prompt, agent, image_size, **kwargs)) + assert image_path.exists() + with Image.open(image_path) as img: + assert img.size == (image_size, image_size) + + +@pytest.mark.parametrize( + "return_text", + [ + # Delay + '{"error":"Model [model] is currently loading","estimated_time": [delay]}', + '{"error":"Model [model] is currently loading"}', # No delay + '{"error:}', # Bad JSON + "", # Bad Image + ], +) +@pytest.mark.parametrize( + "image_model", + ["CompVis/stable-diffusion-v1-4", "stabilityai/stable-diffusion-2-1"], +) +@pytest.mark.parametrize("delay", [10, 0]) +def test_huggingface_fail_request_with_delay( + agent: Agent, storage, image_size, image_model, return_text, delay +): + return_text = return_text.replace("[model]", image_model).replace( + "[delay]", str(delay) + ) + + with patch("requests.post") as mock_post: + if return_text == "": + # Test bad image + mock_post.return_value.status_code = 200 + mock_post.return_value.ok = True + mock_post.return_value.content = b"bad image" + else: + # Test delay and bad json + mock_post.return_value.status_code = 500 + mock_post.return_value.ok = False + mock_post.return_value.text = return_text + + agent.legacy_config.image_provider = "huggingface" + agent.legacy_config.huggingface_api_token = "mock-api-key" + agent.legacy_config.huggingface_image_model = image_model + prompt = "astronaut riding a horse" + + with patch("time.sleep") as mock_sleep: + # Verify request fails. + result = generate_image(prompt, agent, image_size) + assert result == "Error creating image." + + # Verify retry was called with delay if delay is in return_text + if "estimated_time" in return_text: + mock_sleep.assert_called_with(delay) + else: + mock_sleep.assert_not_called() + + +def test_huggingface_fail_request_no_delay(mocker, agent: Agent): + agent.legacy_config.huggingface_api_token = "1" + + # Mock requests.post + mock_post = mocker.patch("requests.post") + mock_post.return_value.status_code = 500 + mock_post.return_value.ok = False + mock_post.return_value.text = ( + '{"error":"Model CompVis/stable-diffusion-v1-4 is currently loading"}' + ) + + # Mock time.sleep + mock_sleep = mocker.patch("time.sleep") + + agent.legacy_config.image_provider = "huggingface" + agent.legacy_config.huggingface_image_model = "CompVis/stable-diffusion-v1-4" + + result = generate_image("astronaut riding a horse", agent, 512) + + assert result == "Error creating image." + + # Verify retry was not called. + mock_sleep.assert_not_called() + + +def test_huggingface_fail_request_bad_json(mocker, agent: Agent): + agent.legacy_config.huggingface_api_token = "1" + + # Mock requests.post + mock_post = mocker.patch("requests.post") + mock_post.return_value.status_code = 500 + mock_post.return_value.ok = False + mock_post.return_value.text = '{"error:}' + + # Mock time.sleep + mock_sleep = mocker.patch("time.sleep") + + agent.legacy_config.image_provider = "huggingface" + agent.legacy_config.huggingface_image_model = "CompVis/stable-diffusion-v1-4" + + result = generate_image("astronaut riding a horse", agent, 512) + + assert result == "Error creating image." + + # Verify retry was not called. + mock_sleep.assert_not_called() + + +def test_huggingface_fail_request_bad_image(mocker, agent: Agent): + agent.legacy_config.huggingface_api_token = "1" + + # Mock requests.post + mock_post = mocker.patch("requests.post") + mock_post.return_value.status_code = 200 + + agent.legacy_config.image_provider = "huggingface" + agent.legacy_config.huggingface_image_model = "CompVis/stable-diffusion-v1-4" + + result = generate_image("astronaut riding a horse", agent, 512) + + assert result == "Error creating image." + + +def test_huggingface_fail_missing_api_token(mocker, agent: Agent): + agent.legacy_config.image_provider = "huggingface" + agent.legacy_config.huggingface_image_model = "CompVis/stable-diffusion-v1-4" + + # Mock requests.post to raise ValueError + mocker.patch("requests.post", side_effect=ValueError) + + # Verify request raises an error. + with pytest.raises(ValueError): + generate_image("astronaut riding a horse", agent, 512) diff --git a/autogpts/autogpt/tests/integration/test_setup.py b/autogpts/autogpt/tests/integration/test_setup.py new file mode 100644 index 000000000000..3c66e257f2d7 --- /dev/null +++ b/autogpts/autogpt/tests/integration/test_setup.py @@ -0,0 +1,70 @@ +from unittest.mock import patch + +import pytest + +from autogpt.app.setup import ( + apply_overrides_to_ai_settings, + interactively_revise_ai_settings, +) +from autogpt.config import AIDirectives, Config +from autogpt.config.ai_profile import AIProfile + + +@pytest.mark.asyncio +async def test_apply_overrides_to_ai_settings(): + ai_profile = AIProfile(ai_name="Test AI", ai_role="Test Role") + directives = AIDirectives( + resources=["Resource1"], + constraints=["Constraint1"], + best_practices=["BestPractice1"], + ) + + apply_overrides_to_ai_settings( + ai_profile, + directives, + override_name="New AI", + override_role="New Role", + replace_directives=True, + resources=["NewResource"], + constraints=["NewConstraint"], + best_practices=["NewBestPractice"], + ) + + assert ai_profile.ai_name == "New AI" + assert ai_profile.ai_role == "New Role" + assert directives.resources == ["NewResource"] + assert directives.constraints == ["NewConstraint"] + assert directives.best_practices == ["NewBestPractice"] + + +@pytest.mark.asyncio +async def test_interactively_revise_ai_settings(config: Config): + ai_profile = AIProfile(ai_name="Test AI", ai_role="Test Role") + directives = AIDirectives( + resources=["Resource1"], + constraints=["Constraint1"], + best_practices=["BestPractice1"], + ) + + user_inputs = [ + "n", + "New AI", + "New Role", + "NewConstraint", + "", + "NewResource", + "", + "NewBestPractice", + "", + "y", + ] + with patch("autogpt.app.setup.clean_input", side_effect=user_inputs): + ai_profile, directives = await interactively_revise_ai_settings( + ai_profile, directives, config + ) + + assert ai_profile.ai_name == "New AI" + assert ai_profile.ai_role == "New Role" + assert directives.resources == ["NewResource"] + assert directives.constraints == ["NewConstraint"] + assert directives.best_practices == ["NewBestPractice"] diff --git a/autogpts/autogpt/tests/integration/test_web_selenium.py b/autogpts/autogpt/tests/integration/test_web_selenium.py new file mode 100644 index 000000000000..1d904b855f76 --- /dev/null +++ b/autogpts/autogpt/tests/integration/test_web_selenium.py @@ -0,0 +1,18 @@ +import pytest + +from autogpt.agents.agent import Agent +from autogpt.commands.web_selenium import BrowsingError, read_webpage + + +@pytest.mark.vcr +@pytest.mark.requires_openai_api_key +@pytest.mark.asyncio +async def test_browse_website_nonexistent_url(agent: Agent, cached_openai_client: None): + url = "https://auto-gpt-thinks-this-website-does-not-exist.com" + question = "How to execute a barrel roll" + + with pytest.raises(BrowsingError, match="NAME_NOT_RESOLVED") as raised: + await read_webpage(url=url, question=question, agent=agent) + + # Sanity check that the response is not too long + assert len(raised.exconly()) < 200 diff --git a/autogpts/autogpt/tests/mocks/__init__.py b/autogpts/autogpt/tests/mocks/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/autogpts/autogpt/tests/mocks/mock_commands.py b/autogpts/autogpt/tests/mocks/mock_commands.py new file mode 100644 index 000000000000..ab9e961b6597 --- /dev/null +++ b/autogpts/autogpt/tests/mocks/mock_commands.py @@ -0,0 +1,29 @@ +from autogpt.command_decorator import command +from autogpt.core.utils.json_schema import JSONSchema + +COMMAND_CATEGORY = "mock" + + +@command( + "function_based_cmd", + "Function-based test command", + { + "arg1": JSONSchema( + type=JSONSchema.Type.INTEGER, + description="arg 1", + required=True, + ), + "arg2": JSONSchema( + type=JSONSchema.Type.STRING, + description="arg 2", + required=True, + ), + }, +) +def function_based_cmd(arg1: int, arg2: str) -> str: + """A function-based test command. + + Returns: + str: the two arguments separated by a dash. + """ + return f"{arg1} - {arg2}" diff --git a/autogpts/autogpt/tests/unit/__init__.py b/autogpts/autogpt/tests/unit/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/autogpts/autogpt/tests/unit/data/test_ai_config.yaml b/autogpts/autogpt/tests/unit/data/test_ai_config.yaml new file mode 100644 index 000000000000..b6bc7cd94089 --- /dev/null +++ b/autogpts/autogpt/tests/unit/data/test_ai_config.yaml @@ -0,0 +1,5 @@ +ai_goals: +- Test goal 1 +ai_name: testGPT +ai_role: testRole +api_budget: 1.0 \ No newline at end of file diff --git a/autogpts/autogpt/tests/unit/data/test_plugins/Auto-GPT-Plugin-Test-master.zip b/autogpts/autogpt/tests/unit/data/test_plugins/Auto-GPT-Plugin-Test-master.zip new file mode 100644 index 0000000000000000000000000000000000000000..45b515adc18a6bfb9cb6700668bd3718de3f3cfa GIT binary patch literal 15385 zcmbW81#}c!*0$sBZp1xAh>;L?B}SaMcU(H|PTYkEad#)~O5BCGyAmVvhs!Vn_qzkL z{_b^pcdyk|&)#+F*sk};Nr6LPfP8!9N7XC;_2pk51W#7L9%w}`CZ|X*XKrr{u%uTs zv;)#x=-L4dZ5ieMbtDYPMn$ypJEGxZKTr@5V{i}&~Eyf z38i_T4$9U9zd>dsAcwwsRLh0BnIa@Ir-D9`%}7+H%5yoY@-3RUE!=5)w9$v9Walh1 z*}6WL0_PAVkXd{%5`5P|ijV*{n6C4ek+Aa@ zG7!tut#o*5(;KED;1-ba)Cy7v9Oe_@wy$VOb+nI7tRc*%&}(ZU*I;}AO}luXxg|6N zAah{+u+sDkg!7F6;4%>Pc$fSEnizg%+k=iqYtTX3iXOCV42kdo@$SlMuESjJ0z>JP zpz^e$`uFwzvU!>o42_T_{>{V7^CA+(hc4$n`nL%k!$tP0MsQ~p!vcBz*W%lsTwh)% zA9ZfCJP=w+r!c=zFLv*{@qYyWR*`UjRr+7P4d(^*rV3fZ5|Ze*@&X2|inPXMWd z7{z~;Cr@V)25+(4W|t&8L_VL90HJOSRE z7htrYP?)ZRkQ~iNb@s$p zNMyP`gZ%2UQQ;YX`_o;G3=IN8`6ri+0YHGUrIoFrh09v0EG(*+9{MNr zB9MxpL&%JQ3aX%j^%u1hODA6lxac)dkZp|9POk?qf}NU-OxZ@-UomH*6I7-ij!5#k z->V};<{8z&7b&}BE|!{=ek%8vGH+pF7lys!vq#F|zRi`x36B6t4I7edB}ce)#svN z@|m-UL!q=oMXKHw!;16GNE0-F=ib+6%zN%S+%AP9u1UEl^*YwZCUPM{Fpauad2`4P zv2QtbDLhR!00^U-%vDOd;H;2QYf;k#hz$^f?Iih@Z?x*|9+wb7`DY;zbnI3H0|o=6 z53#^;7UY5>O)d!qZ6t~1gu)LD!XAAhARq+=GMKoy+uAs%?27NbjZk7<#i9+Q>j-Yu zw#^|a!wY3>e3{o<;dtmyxHgs@5fqat;0Z(lz(Ur)lu5v>Qtt-rJccJx#hn^FsAGwR zf5jK~wXPR_Je1>=^@FLjT@L+7TrY5fCU)&;Mz{KG?W?b-O7XLri9Mh_=BAC zhD;JZfwlL1vwa@9VAN`>Hq=zS#ZxZ4u3vNLnhvH2D_Lq*UD-frMpYaqoLiW{lx(6^ zE`_nnkO_A9cv07+l+Rlb2KcHbC_({KrTN*dG%0r5_H3PX1bszky26Y_}wZg`d0jPri8&cgE(Iayo$aQ7h_M8h3K95 zL~T`XnS{HjJd<9mIgYzAx4*BM+G;dacV~Jut9zVv7-}hUyhZ-?s&Hnl7D+u_5#Oio zk>yWaqqVIey}p%&1pxSDz6}@*&bk)n-SR`GeoQDWS2R#ACZD2ruw3#-?T9j;Q&3jn zNr#eW6_+C~oA~Zy)m=<;?l3LOwn%e3Fkm=>AmrA1CFR45#49E3wQP*fJ^#2QbwP&i zvYQ2#sYk6dd(JbmO1Ko|WePzCBV;ZDDRz9I{j7=_rpn?_gT*kIZF9WPYjajn@IO`IXo-H9lpmV0Y*VW@Q#T&St?HHO2`ja;Tb(=|Eiqx2IQU zfEXtl^ET#f-v#0=VmO9w@El=Se6{!6i>Wd{CiSd^8y9SG-8q%#a&Ax|^zObz&A3$l z^)(>RQt~z%O3D+Bp7$$==<_{fet`y4w zPrdOvwEnC8im+Hp~n|;vz&$ZYgdp0HgGN zasrB3A}BSr1m)$D7e4k8_p=DER0ksm<-*u&s!QXqc0_Hhe4d^if$gY$FS}E8q)*}1 zTS%Q4GrL*6!*sWAQxF<)bhf#ti>AP@aadH*iP3%ZS73O~Zl*|b%NQJ+E{I!2ad)b7`w31+;@Pa}Osg4Lv1?Hu8~orx%w=jGv|g3&ytrMa;Y(T|A6BArCx~BP{`k*{W>V+c{g~$vVB-vfvCg) zW(AY{LIxKsGhtBmwLXuhLj8wUsBdnlYpJbkZq8t5 zViEEJ$%_~T!c|+~H5l)1ru7O1?M7S2atI^@T0?3-lD3}bPPJggTR2t8dP}eCq#*(r zxwy%l=FA0uqm5Tl4Y7;*Iz2cXc|?4_`eVG42XF=h0bzsq^ZZ*{LP$jBjfkPbr|@JZ zjK!~XrIkrhqt!`A4R$^(6>vDX#^cXn6IX&)`q;y^j$L09rWPt>_swj7Na(Nk8Dw?2 zNQ#W(@#L^O0Ow{KIl2Av1)q9<4>jJ>lvRAh~ZPCvP{&@H)7~?k{E_YH z$BAFc8tI@)dM)K1m7^kJq^x8gbt(YkuPucoc#f;F=#nu#cPUp40vsK14j{8Kb}QIk zL_Ny9>nOU_@iZe?CUUL@EfIyEf#%@T`r@(q)dgoI-O4|MVfw9S@Ooww^7TnB;flHD zFjlaMChLSq;T5GEY8RLyZ5Z0&^G5=`kW+2$8;$8eByiy(wMF?vyR{!KOyGFR6&s zw$vX2jv_9&tQI#;&eD*EHsFkA9IHuHN>P`SZW6w54PlmxFnxaXd5D6apbGv2d8ugz1T?qy@WDozX zpaeH%qC#QsUbxD0P+Dh9cr~dgDv141eA4KyW}l(wc2g%eA}Gvhb(pRwf&(T28kh_t zsK+bVXMbx+sSFuSmFOTKee570;jOFPIxET}Uz(sDlaOr}zaitTaD)t#kwrU1Jg$cP=NyX3Up3MNv z%h_aOOJ=;{aqv}k2gzxN042A@)L1xtZ)c?1qC`R{Uk?U?WKC9=|;;s~V(=BQ!@*E=X^0UyLky!KV4Z@`&fR94D4PD?0{swM;)Xjd`Kr}>Fx_@_M&U*n?_Hy0y#RPhyGs4Kl* zBK3;Z>1w8|bFm~f65HOVCZ@QFhLvGpK;li-$nTs;NZ8J!Dsx%#`Vc-~kIs) zL?wg;RSXnidFSR@KSp9nm>0VX?c(k#tQ#v{#!L~0tP8ZsCsVuJ-NI&eRW&S~57zuR z1G!Ku7|N<0@gPNu+Ie(Yw~#>%8|sWk7?3V|%}g%I4h!hFE(a~pJQ<47Qst^* zOWhe8AWkRYpt}O!FQ;TyQc~27oQx~UQj6&zJR7#IF;QlshetTC-?hx&r-gh;DLF`h z6)i0C=IyL*>=t0LGpSH5BZO7Tg_t7- z_jAv`NjPd!@W&_wM8AQEFH4!64{0(+}wcc$~%=I$PdqZCA<%xxj zqSX$pl_yj6VnUK$XWx|N($)?(%%WP$8;e+b$LdXPnOLI0N|cDyaDpCWEy;K`g1tquZ-%`nO?2|!G@)085-@)~ z_iO~@rhzTe!KZ5P5K$!N7^fClgs##Uae?UwW&fJnSu(T8k8B7j^0k_>Vjy`GR|=P% z8GgxyMXUp)I<^Sw%a`(VgZ#l+U6#XT`O>4Dd-(b8GbwyP$DyHMQkGY}8Df4>B8M7~ zkffWlY$8(>BoTL@Eq0h<<(lZ6`RpjXSQ>|kk7hk}U?-PJi!tR>2$#@smM3*m_P#ys z%qCX)DPZTNL7%&_N8ljL7C!|s5>i&U~L96 zm$cRoOj(G<1AGR_aQL|#-;ANI4mbE5)TA~po=iL3&0J)F`|XXN_s{L#Y(id%GDr{2 z(T*d0WWi)H5euDcXA_ozPQ;HIuNQ41l0=iR6EfvaKRu;0ETVILePifw3FZFMo#o@(xNM53?^$26X zume;MDsXlfDrjDt<0Wprj^_kY2MWd$q8)!2R0D)v%kb};dcjFOP3={g+XO#~MrnZ} zG&R?h-CifC2Q*7#OI;(hjWMerFtXVY_BM$H4?Ing$YqN-u+)|ja0*uteF!7@WdI-F zMwPgCMtE-^rkd{fIuW~OM>>;c#=TaD5tmBYxA&Z+E{mSBJeXip$scI_=`;SkYe^W6da z{D(CLh~>v6_ueMBH)H|+#mb+&hvKad8aw!AriK84g&h*;jYLcYxr3>0&V@{XDaHRXmxk zDPEze4kY^>XsF8QwYB65-&BM@(hd8izzWNuF8X9AX%_dG0kwTa>5eo`(6=i<#`gj1+`~zJ zq!0}t1aTTy8JSu|LK{k=N($oruyTxXhf&2LAnP8V&^cY(>l3VnUyBXMog_pv7aoxm zK{Wy7xI@HRG9Av%g!{-zF5!*}t*c5Gt})(ozY72$_KAZKT5m{xJ!L5F#93qSXO&xR z;p5O|jMAaPu|bc3WTkj7v;WGKwX57W+b!IIyMa9Q1ozdtKwt|gYU7?iSdRMVaL+xy zujP*i2l{qWuZLxIytU&T*<2(?LXz*oTU%CqNe~8R+MgW;gEE1;PrEU@Uv>&y+!ql6 zFX$eQ#E6<)XwA$^%R;qnz3eMiT+ zvzVxFl+1>h5$vuRKOUS=uGALaT6x9#4YY4CmL!6`7m!vIj`7L`?nviFB!pmqB89o> zvfdll`MNGO7rzm>B5+Rub7k=K5D z#eat*H=suu1@*j@>TI`Je4N1z$h!MsDkU-)XQ5B^W0TYR4n28lm&CbLj51I-ARZ}v zj1+EJcxhsHho(#Nwvh}r^utovhjHjTl2cfF#`GPfs!R{QoT{5lt?^wv@d{VzcPOaV zb&0#x8nzxXSMc>wwgMR3do|>%9N`e$XMzK946R(r1RlJccWYDnGR6ar86aT#KJVNMCe^*dH&HKO zu7;aPZ6`x#dK@rnQI-0vQ!GC1u}iy}_In9bG~!9*1NmKo(ZVio@wa$PyaoVR5}RIG z%oyvWud^vMf~}hID;XL*G8&(I#k|U^U=omfS8t(^k7aK_KAE}}o46mw8jl9!uK)QY zl0MyNdrj`%XFR@iJ{mDRFqUG8-}U0Ei2F;zgN4S0f;4IG=Z|DOW8-{JE39WLV_K(g z@}>=5<1)R>rU^T4!xInN;EZv>^x{VXfn@j3=XV#Eu?g(d^ctJFv(84R(+eMD^`OOC;$}VB7b(BGGn9i(pS=hp>HiV2cf;^^cKw5G`q&*8}x_z2su{ zNYB9MGZN~?qb-2}mB@}1!&&oXCcloUeie-dhYPYWYvLRmI-!h1b?+@wyu~R2BU{dH zmzsbksO{%LYu0C9^5w7 z?UiZ_%NJgLcO)BxsW7Lj1*GmspltF9G_B@2Wv1bxyKfxFD-imwHIH~LT>GH&jQsF) zDgu;_nnrP)){K*LDA?^B>;fUlG^Y|-;vEjKIfd=4D2dGM>|X81zOTMs{3E;JS$=g} zXJ96_Z_hHe@IYmp-AZk)5F)xnkKRiY728um3)$NWe?%IQ4IeGDjqDOO2(n^0llrh8 z(%IkN-~Oq8R|R`fQ3TJcMk_L8^%xVt^_jbh;=r;&t`w>!f(8jCAmr2edO{ki3GCpx zG@~kt(ikOV^40rRRtSL`C&=wD&)C8}&Z-N-UJpwGaE}#Ry5HPjEkj*Phh}~Or}l{; zXvg=DJ56#6a=N}c@fb+nz&0YpP9D2rXxyAAWra zUFh=h7G{WYm?U}1bP-T;IT3|RQ?_GH4`4k<#NV2V(mvcPCQ7pvTuy88pvsmxc3K3E z)_7e8;jcRw7a|HwYKEeDmQo-i^S!kFXuT5GOPgWlsF__oW05^=I7=N22}~XqC!x1o zK3X1p-bJWO67d~X@2AW*n2^AjFI~ey@K;rnLSp*iaQOi-b-6LBi(-t>s^E|TKuvv8Od5y z_Q#qd#I0`Jwc432k)T9j{Fqp!3_9bJLw6M}$V8%XO+F(=_?orR@QIrIQsFeWr7BCF`ch2j6&H&%9?KXe6QW(vZmH{8Hv#j^y6z!^`g^?ctQtw zqz&3L-Mq5z$RKkTQ>7CWb4hGj%Y~bUBebipl1C83VRI$iX2p_RqaR`xqR9DSkb+tI z*jY5{EFV{fZbH^4LQ?J0Or;QxQi^tzW-dcr7uv3v^9xKR^Z$%wFa1v8@c2)7MmL%*#n&!(I5Q)qjm`+p-xRP@m{vrJ*{of zPH%B`b;rocb8kF@F;{9U2rq|eLqx)htkMW=NEFU)`b5N?w82pBgo|X1 z0iyNn^L$Ri!z5DXGi9L$M)wh8rh7I@uW;*V2iboahzpCejnMt6epzbkzKWCL1u?$VsdpD@PXu`5rXRaaX+ zzd~mTCX6;44RN!av)A559;@1TLgtrlwXjfv zITJY_YQtjMJsw6ap0}1vE{mC34WXF;k*@KDX|H}tUX|s8sg#WiHrtyGTQxmL8KnRs znxm8~ymN1wSLR$z63EJ`=Z@`n%uPdeAGH#;5+-Kysjxe^w=9XBka;R_d+$1SWj&l0 zF49m?KfF02ryAttpQYU*0G=JLZXBFlU*sgN`K6KN_&cU1V*~{XoI12_CF9=W>_Z{3 zb*#ReaoV)nbiR)P*_0@28zb@8$9fOqM!>uej~z{UgnM+9R?f};u%E*L@mh81>t9-w zZ*rvXqN3WmIW^3uY<1q#bF4o-!?JdMQl6L^>H`^oRu<-|3YumMOc?GnWhi=)f!)E4 zq0v~CC5r>fCID4IZDw?go1SS(sU%*(j#@?=&QNw@8Pw=Ai))xM>BO~bvCgm)(Z*Q%xZa^w0+${jaN+fY!7akX$4Y6jRo+s> zpoW^aLmR#h+2r2F4%LycaDN#gcN_k0Yf_tT@8umF~R@MlBG5G0j@}2+C`|Ww9wTkMqi&w;yEK<`u`opHGBCD*cTnOlE=BS!a zgt7|H+%Wc0nj>Gp&lZuWcLp?bs#y*0zSA<4$&-AKaFdFKs%qm9PgN!)F#B@BtFuHE z+Yd+cBVpKZW`tG}%d5R)9wNOq%Qqs}Er>Cm&w9oEBZ;dELZ(eyrp1*}2FVQYq%qC1 zQ>Dc*T6_{y?dk|V-d~dvVF=?EyqR<1xo%(7a(81PE)SvBglZ(~l0jE2C(H_yG><(> zx7nAo1mKvO7R%@45^u{I-kSJ{8P2$rc@|t5qk3SUmpBNYK8d5bBp(h{H>7u z*#kQhG7?5TfF+}zuAK>-ogt8z-q7BNnBI!m8enZ`qzf>I`>vQGW`MJH2AWt|65HBa zYJX3LKWVdmeG&xUym$h9f`&X5ia)>=LyG{)F!)Bg)-Tcei`Y)^-2j}ADP~K)x9a>Sq+i`;u6k}P z`{b_PQ+fUew`~n=>;X?wEJI769Rtt_2-z-x)X4+`(Z=O@+PvIOV=rIp)uAr$nf4x* zN9J@mt5GETkshV`mP3<&U0{&qmTI^B>?i$E56B#z zUmZL2sX)Aaa;V@*5Ai46+%MH)5%I#R^GV<4=mDu$nWfXHq*NczErQ$Nyp!1NFJ%D< z&J)glb4B4$q$)2fxEAiZ>fBcP`qye#LNb%~jVjx@oLDkgF=LX@P>4C&8O+SR=C?7K9r5OyqgFW(_K6FHrk zD_>7y1#LeHJ^Z^pg8Zc*eY$r4ei1)?`&_t{`TLY{Q6a~DQiIFKM96Hm>O>;L0y+S&k108m@|snt!XErfp)0v@X*wnU2L z_{_wCkdUCDz03&4_9KmvS>QJfoLxP;8k`M3G^k>wk$^-sM|a&C?C#a?Rbxm<7AnPg zn1I&7mrIHhX&!-G=nv*8scCv#TQmx@4bh~Ckbb1Kt1!N)h>bIn?JmP~jW>4b*4l)7 zY@xSjfMirQf0F>YGE#=ddt$0v!?~98RUvO$v~X1wOT)Lf(}VSD_`*_STkjOfUg{nU zLQx~@Z3Nz|!Q8I87Clx-@rtB#F|xBV{D%ucZB0fTEu7RF`+}VzFKpJAZ)Yo5heS#z zSmosoVEQ!+88ReGr&}He9HTZ~Vcx;ubXK;vSZQiFbhQZOFufOk|Fm-9wB#5I7arE;E z-0(8QDy$x`#+W)V2-i|>3HouTz(+-|`?!BJN+lKI+^t3==}J)7J#)mPU0+`+^kKn; zP&te=Qyc(>X|~Q9wC>&$iLe<0O! zK4y_L3{Zw2^)exEi36j|QOn*V(+i5|+r_*d+!&S$HNYA=_+%4OJk-t5!RxC{U(@1m z+0|e~I}7n7+tK%NIOOqR`yK$!EZtV&@h1VgB?4WN8q_UOqQ*Zw#C7aY&v?k(b?)-t7|^+ZV}YzQN=aRjm| zLNE^VA1O#7AFZPMy+awqRPz}JNSQgvB48f4Dec*)-qsA0jGhMuuy z!3mn+xx0g}FKLM|_a<9(1rg6SnJaF{i0oXs-rv<%%`Rm7yWr$ps6!)opEg;nlzE_eIDC1?c> zkATD4ZS!!KA!6-x!fGDCwfon6Jbx3nL6*6D=_#$1UW)S#)R~E)?!GDcJjLy;l-xD1 zl(Y|IEaX*;aXqVE=#!!h!8JZ`+ED`yH(IOs<^hyc1~}_|laX=#QoRzaC4<$aRP)8o zTx7go5eC-EX^Sc5uazgnE>Q4mU6zN%(b{q22L=e!@cBzy*wd+6r-+qbE?Z=#8mU8G zJ9|7J`}7P>E`uTN!0@WxT#hLbci=zA=p=P?aQ1zX2p#4|EHHtg^C(B-@i)~u45LVb z=u=1gt^8>=Vh40KH~cmq`%7m@%s@=}U20BDPjBd?Z*Fg3$V19NtHDT1Z2+(Xx;YqH zI=C_X^$Gm8eZkhy&fXkoNAvUt23k^LhHu08f97O~&4$zRPfmO*f07w|cjVV;8Qi~| zg8NJT{*y~o|Eo(>-&_I(;{*9`+cLhn_3ef7-TZc-gaaY|>tCQCI3O2LWES`lbD%$L z6Fz`6XkJg}dG%C&H!T0OvEyejNKY#kt?eABuGe?q5BnYbpKJf?)`OqH z;7^Od--3VMi15Fo|FhEkbzj2IXx%@dzt6e<396R&J>DPo?-%;lodrKbF`gE@zm50b z`waex3mpE={bB#k{Z-8TGZ*)1kHc@d|CToY6W8<0@43Iqg@5KU!u)~z&*I_VIq}1p zzX$)TF7{_9KEeLMiC)^`UmpARDk~k{SodD`*-NClC7Vi$xnUP@9wi70>ob-J>p+;3O}2-^PD8+TWwUEtLO> zh~L|2^KCBhzYv*zAvXLEMBTqFuK&q{!E)>W;=w;H!GBzC;-{El=>8n@-#z$gLH@Hd zYI6`W-!21noTlwJySx0oBEDC^f7;lu)6*Z$cs)7e{FH3?UETkCvidXjM?3p56Z>KR rj{S8q`ZE^n#s7l+=hpp$_QT3aK|bvf0Rh2!dRsol9QSl?5Rm@|$|(%n literal 0 HcmV?d00001 diff --git a/autogpts/autogpt/tests/unit/data/test_plugins/auto_gpt_guanaco/__init__.py b/autogpts/autogpt/tests/unit/data/test_plugins/auto_gpt_guanaco/__init__.py new file mode 100644 index 000000000000..eb024b37c072 --- /dev/null +++ b/autogpts/autogpt/tests/unit/data/test_plugins/auto_gpt_guanaco/__init__.py @@ -0,0 +1,274 @@ +"""This is the Test plugin for AutoGPT.""" +from typing import Any, Dict, List, Optional, Tuple, TypeVar + +from auto_gpt_plugin_template import AutoGPTPluginTemplate + +PromptGenerator = TypeVar("PromptGenerator") + + +class AutoGPTGuanaco(AutoGPTPluginTemplate): + """ + This is plugin for AutoGPT. + """ + + def __init__(self): + super().__init__() + self._name = "AutoGPT-Guanaco" + self._version = "0.1.0" + self._description = "This is a Guanaco local model plugin." + + def can_handle_on_response(self) -> bool: + """This method is called to check that the plugin can + handle the on_response method. + + Returns: + bool: True if the plugin can handle the on_response method.""" + return False + + def on_response(self, response: str, *args, **kwargs) -> str: + """This method is called when a response is received from the model.""" + if len(response): + print("OMG OMG It's Alive!") + else: + print("Is it alive?") + + def can_handle_post_prompt(self) -> bool: + """This method is called to check that the plugin can + handle the post_prompt method. + + Returns: + bool: True if the plugin can handle the post_prompt method.""" + return False + + def post_prompt(self, prompt: PromptGenerator) -> PromptGenerator: + """This method is called just after the generate_prompt is called, + but actually before the prompt is generated. + + Args: + prompt (PromptGenerator): The prompt generator. + + Returns: + PromptGenerator: The prompt generator. + """ + + def can_handle_on_planning(self) -> bool: + """This method is called to check that the plugin can + handle the on_planning method. + + Returns: + bool: True if the plugin can handle the on_planning method.""" + return False + + def on_planning( + self, prompt: PromptGenerator, messages: List[str] + ) -> Optional[str]: + """This method is called before the planning chat completeion is done. + + Args: + prompt (PromptGenerator): The prompt generator. + messages (List[str]): The list of messages. + """ + + def can_handle_post_planning(self) -> bool: + """This method is called to check that the plugin can + handle the post_planning method. + + Returns: + bool: True if the plugin can handle the post_planning method.""" + return False + + def post_planning(self, response: str) -> str: + """This method is called after the planning chat completeion is done. + + Args: + response (str): The response. + + Returns: + str: The resulting response. + """ + + def can_handle_pre_instruction(self) -> bool: + """This method is called to check that the plugin can + handle the pre_instruction method. + + Returns: + bool: True if the plugin can handle the pre_instruction method.""" + return False + + def pre_instruction(self, messages: List[str]) -> List[str]: + """This method is called before the instruction chat is done. + + Args: + messages (List[str]): The list of context messages. + + Returns: + List[str]: The resulting list of messages. + """ + + def can_handle_on_instruction(self) -> bool: + """This method is called to check that the plugin can + handle the on_instruction method. + + Returns: + bool: True if the plugin can handle the on_instruction method.""" + return False + + def on_instruction(self, messages: List[str]) -> Optional[str]: + """This method is called when the instruction chat is done. + + Args: + messages (List[str]): The list of context messages. + + Returns: + Optional[str]: The resulting message. + """ + + def can_handle_post_instruction(self) -> bool: + """This method is called to check that the plugin can + handle the post_instruction method. + + Returns: + bool: True if the plugin can handle the post_instruction method.""" + return False + + def post_instruction(self, response: str) -> str: + """This method is called after the instruction chat is done. + + Args: + response (str): The response. + + Returns: + str: The resulting response. + """ + + def can_handle_pre_command(self) -> bool: + """This method is called to check that the plugin can + handle the pre_command method. + + Returns: + bool: True if the plugin can handle the pre_command method.""" + return False + + def pre_command( + self, command_name: str, arguments: Dict[str, Any] + ) -> Tuple[str, Dict[str, Any]]: + """This method is called before the command is executed. + + Args: + command_name (str): The command name. + arguments (Dict[str, Any]): The arguments. + + Returns: + Tuple[str, Dict[str, Any]]: The command name and the arguments. + """ + + def can_handle_post_command(self) -> bool: + """This method is called to check that the plugin can + handle the post_command method. + + Returns: + bool: True if the plugin can handle the post_command method.""" + return False + + def post_command(self, command_name: str, response: str) -> str: + """This method is called after the command is executed. + + Args: + command_name (str): The command name. + response (str): The response. + + Returns: + str: The resulting response. + """ + + def can_handle_chat_completion( + self, + messages: list[Dict[Any, Any]], + model: str, + temperature: float, + max_tokens: int, + ) -> bool: + """This method is called to check that the plugin can + handle the chat_completion method. + + Args: + messages (Dict[Any, Any]): The messages. + model (str): The model name. + temperature (float): The temperature. + max_tokens (int): The max tokens. + + Returns: + bool: True if the plugin can handle the chat_completion method.""" + return False + + def handle_chat_completion( + self, + messages: list[Dict[Any, Any]], + model: str, + temperature: float, + max_tokens: int, + ) -> str: + """This method is called when the chat completion is done. + + Args: + messages (Dict[Any, Any]): The messages. + model (str): The model name. + temperature (float): The temperature. + max_tokens (int): The max tokens. + + Returns: + str: The resulting response. + """ + + def can_handle_text_embedding(self, text: str) -> bool: + """This method is called to check that the plugin can + handle the text_embedding method. + Args: + text (str): The text to be convert to embedding. + Returns: + bool: True if the plugin can handle the text_embedding method.""" + return False + + def handle_text_embedding(self, text: str) -> list: + """This method is called when the chat completion is done. + Args: + text (str): The text to be convert to embedding. + Returns: + list: The text embedding. + """ + + def can_handle_user_input(self, user_input: str) -> bool: + """This method is called to check that the plugin can + handle the user_input method. + + Args: + user_input (str): The user input. + + Returns: + bool: True if the plugin can handle the user_input method.""" + return False + + def user_input(self, user_input: str) -> str: + """This method is called to request user input to the user. + + Args: + user_input (str): The question or prompt to ask the user. + + Returns: + str: The user input. + """ + + def can_handle_report(self) -> bool: + """This method is called to check that the plugin can + handle the report method. + + Returns: + bool: True if the plugin can handle the report method.""" + return False + + def report(self, message: str) -> None: + """This method is called to report a message to the user. + + Args: + message (str): The message to report. + """ diff --git a/autogpts/autogpt/tests/unit/models/test_base_open_api_plugin.py b/autogpts/autogpt/tests/unit/models/test_base_open_api_plugin.py new file mode 100644 index 000000000000..7a8522e7fe52 --- /dev/null +++ b/autogpts/autogpt/tests/unit/models/test_base_open_api_plugin.py @@ -0,0 +1,81 @@ +import pytest + +from autogpt.models.base_open_ai_plugin import BaseOpenAIPlugin + + +class DummyPlugin(BaseOpenAIPlugin): + """A dummy plugin for testing purposes.""" + + +@pytest.fixture +def dummy_plugin(): + """A dummy plugin for testing purposes.""" + manifests_specs_clients = { + "manifest": { + "name_for_model": "Dummy", + "schema_version": "1.0", + "description_for_model": "A dummy plugin for testing purposes", + }, + "client": None, + "openapi_spec": None, + } + return DummyPlugin(manifests_specs_clients) + + +def test_dummy_plugin_inheritance(dummy_plugin): + """Test that the DummyPlugin class inherits from the BaseOpenAIPlugin class.""" + assert isinstance(dummy_plugin, BaseOpenAIPlugin) + + +def test_dummy_plugin_name(dummy_plugin): + """Test that the DummyPlugin class has the correct name.""" + assert dummy_plugin._name == "Dummy" + + +def test_dummy_plugin_version(dummy_plugin): + """Test that the DummyPlugin class has the correct version.""" + assert dummy_plugin._version == "1.0" + + +def test_dummy_plugin_description(dummy_plugin): + """Test that the DummyPlugin class has the correct description.""" + assert dummy_plugin._description == "A dummy plugin for testing purposes" + + +def test_dummy_plugin_default_methods(dummy_plugin): + """Test that the DummyPlugin class has the correct default methods.""" + assert not dummy_plugin.can_handle_on_response() + assert not dummy_plugin.can_handle_post_prompt() + assert not dummy_plugin.can_handle_on_planning() + assert not dummy_plugin.can_handle_post_planning() + assert not dummy_plugin.can_handle_pre_instruction() + assert not dummy_plugin.can_handle_on_instruction() + assert not dummy_plugin.can_handle_post_instruction() + assert not dummy_plugin.can_handle_pre_command() + assert not dummy_plugin.can_handle_post_command() + assert not dummy_plugin.can_handle_chat_completion(None, None, None, None) + assert not dummy_plugin.can_handle_text_embedding(None) + + assert dummy_plugin.on_response("hello") == "hello" + assert dummy_plugin.post_prompt(None) is None + assert dummy_plugin.on_planning(None, None) is None + assert dummy_plugin.post_planning("world") == "world" + pre_instruction = dummy_plugin.pre_instruction( + [{"role": "system", "content": "Beep, bop, boop"}] + ) + assert isinstance(pre_instruction, list) + assert len(pre_instruction) == 1 + assert pre_instruction[0]["role"] == "system" + assert pre_instruction[0]["content"] == "Beep, bop, boop" + assert dummy_plugin.on_instruction(None) is None + assert dummy_plugin.post_instruction("I'm a robot") == "I'm a robot" + pre_command = dummy_plugin.pre_command("evolve", {"continuously": True}) + assert isinstance(pre_command, tuple) + assert len(pre_command) == 2 + assert pre_command[0] == "evolve" + assert pre_command[1]["continuously"] is True + post_command = dummy_plugin.post_command("evolve", "upgraded successfully!") + assert isinstance(post_command, str) + assert post_command == "upgraded successfully!" + assert dummy_plugin.handle_chat_completion(None, None, None, None) is None + assert dummy_plugin.handle_text_embedding(None) is None diff --git a/autogpts/autogpt/tests/unit/test_ai_profile.py b/autogpts/autogpt/tests/unit/test_ai_profile.py new file mode 100644 index 000000000000..a60de4d9db18 --- /dev/null +++ b/autogpts/autogpt/tests/unit/test_ai_profile.py @@ -0,0 +1,71 @@ +from autogpt.config.ai_profile import AIProfile +from autogpt.file_storage.base import FileStorage + +""" +Test cases for the AIProfile class, which handles loads the AI configuration +settings from a YAML file. +""" + + +def test_goals_are_always_lists_of_strings(tmp_path): + """Test if the goals attribute is always a list of strings.""" + + yaml_content = """ +ai_goals: +- Goal 1: Make a sandwich +- Goal 2, Eat the sandwich +- Goal 3 - Go to sleep +- "Goal 4: Wake up" +ai_name: McFamished +ai_role: A hungry AI +api_budget: 0.0 +""" + ai_settings_file = tmp_path / "ai_settings.yaml" + ai_settings_file.write_text(yaml_content) + + ai_profile = AIProfile.load(ai_settings_file) + + assert len(ai_profile.ai_goals) == 4 + assert ai_profile.ai_goals[0] == "Goal 1: Make a sandwich" + assert ai_profile.ai_goals[1] == "Goal 2, Eat the sandwich" + assert ai_profile.ai_goals[2] == "Goal 3 - Go to sleep" + assert ai_profile.ai_goals[3] == "Goal 4: Wake up" + + ai_settings_file.write_text("") + ai_profile.save(ai_settings_file) + + yaml_content2 = """ai_goals: +- 'Goal 1: Make a sandwich' +- Goal 2, Eat the sandwich +- Goal 3 - Go to sleep +- 'Goal 4: Wake up' +ai_name: McFamished +ai_role: A hungry AI +api_budget: 0.0 +""" + assert ai_settings_file.read_text() == yaml_content2 + + +def test_ai_profile_file_not_exists(storage: FileStorage): + """Test if file does not exist.""" + + ai_settings_file = storage.get_path("ai_settings.yaml") + + ai_profile = AIProfile.load(str(ai_settings_file)) + assert ai_profile.ai_name == "" + assert ai_profile.ai_role == "" + assert ai_profile.ai_goals == [] + assert ai_profile.api_budget == 0.0 + + +def test_ai_profile_file_is_empty(storage: FileStorage): + """Test if file does not exist.""" + + ai_settings_file = storage.get_path("ai_settings.yaml") + ai_settings_file.write_text("") + + ai_profile = AIProfile.load(str(ai_settings_file)) + assert ai_profile.ai_name == "" + assert ai_profile.ai_role == "" + assert ai_profile.ai_goals == [] + assert ai_profile.api_budget == 0.0 diff --git a/autogpts/autogpt/tests/unit/test_commands.py b/autogpts/autogpt/tests/unit/test_commands.py new file mode 100644 index 000000000000..a939ec4d207e --- /dev/null +++ b/autogpts/autogpt/tests/unit/test_commands.py @@ -0,0 +1,239 @@ +from __future__ import annotations + +import os +import shutil +import sys +from pathlib import Path +from typing import TYPE_CHECKING + +import pytest + +if TYPE_CHECKING: + from autogpt.agents import Agent, BaseAgent + +from autogpt.core.utils.json_schema import JSONSchema +from autogpt.models.command import Command, CommandParameter +from autogpt.models.command_registry import CommandRegistry + +PARAMETERS = [ + CommandParameter( + "arg1", + spec=JSONSchema( + type=JSONSchema.Type.INTEGER, + description="Argument 1", + required=True, + ), + ), + CommandParameter( + "arg2", + spec=JSONSchema( + type=JSONSchema.Type.STRING, + description="Argument 2", + required=False, + ), + ), +] + + +def example_command_method(arg1: int, arg2: str, agent: BaseAgent) -> str: + """Example function for testing the Command class.""" + # This function is static because it is not used by any other test cases. + return f"{arg1} - {arg2}" + + +def test_command_creation(): + """Test that a Command object can be created with the correct attributes.""" + cmd = Command( + name="example", + description="Example command", + method=example_command_method, + parameters=PARAMETERS, + ) + + assert cmd.name == "example" + assert cmd.description == "Example command" + assert cmd.method == example_command_method + assert ( + str(cmd) + == "example: Example command. Params: (arg1: integer, arg2: Optional[string])" + ) + + +@pytest.fixture +def example_command(): + yield Command( + name="example", + description="Example command", + method=example_command_method, + parameters=PARAMETERS, + ) + + +def test_command_call(example_command: Command, agent: Agent): + """Test that Command(*args) calls and returns the result of method(*args).""" + result = example_command(arg1=1, arg2="test", agent=agent) + assert result == "1 - test" + + +def test_command_call_with_invalid_arguments(example_command: Command, agent: Agent): + """Test that calling a Command object with invalid arguments raises a TypeError.""" + with pytest.raises(TypeError): + example_command(arg1="invalid", does_not_exist="test", agent=agent) + + +def test_register_command(example_command: Command): + """Test that a command can be registered to the registry.""" + registry = CommandRegistry() + + registry.register(example_command) + + assert registry.get_command(example_command.name) == example_command + assert len(registry.commands) == 1 + + +def test_unregister_command(example_command: Command): + """Test that a command can be unregistered from the registry.""" + registry = CommandRegistry() + + registry.register(example_command) + registry.unregister(example_command) + + assert len(registry.commands) == 0 + assert example_command.name not in registry + + +@pytest.fixture +def example_command_with_aliases(example_command: Command): + example_command.aliases = ["example_alias", "example_alias_2"] + return example_command + + +def test_register_command_aliases(example_command_with_aliases: Command): + """Test that a command can be registered to the registry.""" + registry = CommandRegistry() + command = example_command_with_aliases + + registry.register(command) + + assert command.name in registry + assert registry.get_command(command.name) == command + for alias in command.aliases: + assert registry.get_command(alias) == command + assert len(registry.commands) == 1 + + +def test_unregister_command_aliases(example_command_with_aliases: Command): + """Test that a command can be unregistered from the registry.""" + registry = CommandRegistry() + command = example_command_with_aliases + + registry.register(command) + registry.unregister(command) + + assert len(registry.commands) == 0 + assert command.name not in registry + for alias in command.aliases: + assert alias not in registry + + +def test_command_in_registry(example_command_with_aliases: Command): + """Test that `command_name in registry` works.""" + registry = CommandRegistry() + command = example_command_with_aliases + + assert command.name not in registry + assert "nonexistent_command" not in registry + + registry.register(command) + + assert command.name in registry + assert "nonexistent_command" not in registry + for alias in command.aliases: + assert alias in registry + + +def test_get_command(example_command: Command): + """Test that a command can be retrieved from the registry.""" + registry = CommandRegistry() + + registry.register(example_command) + retrieved_cmd = registry.get_command(example_command.name) + + assert retrieved_cmd == example_command + + +def test_get_nonexistent_command(): + """Test that attempting to get a nonexistent command raises a KeyError.""" + registry = CommandRegistry() + + assert registry.get_command("nonexistent_command") is None + assert "nonexistent_command" not in registry + + +def test_call_command(agent: Agent): + """Test that a command can be called through the registry.""" + registry = CommandRegistry() + cmd = Command( + name="example", + description="Example command", + method=example_command_method, + parameters=PARAMETERS, + ) + + registry.register(cmd) + result = registry.call("example", arg1=1, arg2="test", agent=agent) + + assert result == "1 - test" + + +def test_call_nonexistent_command(agent: Agent): + """Test that attempting to call a nonexistent command raises a KeyError.""" + registry = CommandRegistry() + + with pytest.raises(KeyError): + registry.call("nonexistent_command", arg1=1, arg2="test", agent=agent) + + +def test_import_mock_commands_module(): + """Test that the registry can import a module with mock command plugins.""" + registry = CommandRegistry() + mock_commands_module = "tests.mocks.mock_commands" + + registry.import_command_module(mock_commands_module) + + assert "function_based_cmd" in registry + assert registry.commands["function_based_cmd"].name == "function_based_cmd" + assert ( + registry.commands["function_based_cmd"].description + == "Function-based test command" + ) + + +def test_import_temp_command_file_module(tmp_path: Path): + """ + Test that the registry can import a command plugins module from a temp file. + Args: + tmp_path (pathlib.Path): Path to a temporary directory. + """ + registry = CommandRegistry() + + # Create a temp command file + src = Path(os.getcwd()) / "tests/mocks/mock_commands.py" + temp_commands_file = tmp_path / "mock_commands.py" + shutil.copyfile(src, temp_commands_file) + + # Add the temp directory to sys.path to make the module importable + sys.path.append(str(tmp_path)) + + temp_commands_module = "mock_commands" + registry.import_command_module(temp_commands_module) + + # Remove the temp directory from sys.path + sys.path.remove(str(tmp_path)) + + assert "function_based_cmd" in registry + assert registry.commands["function_based_cmd"].name == "function_based_cmd" + assert ( + registry.commands["function_based_cmd"].description + == "Function-based test command" + ) diff --git a/autogpts/autogpt/tests/unit/test_config.py b/autogpts/autogpt/tests/unit/test_config.py new file mode 100644 index 000000000000..70d1b65b01e3 --- /dev/null +++ b/autogpts/autogpt/tests/unit/test_config.py @@ -0,0 +1,185 @@ +""" +Test cases for the config class, which handles the configuration settings +for the AI and ensures it behaves as a singleton. +""" +import asyncio +import os +from typing import Any +from unittest import mock + +import pytest +from openai.pagination import AsyncPage +from openai.types import Model +from pydantic import SecretStr + +from autogpt.app.configurator import GPT_3_MODEL, GPT_4_MODEL, apply_overrides_to_config +from autogpt.config import Config, ConfigBuilder +from autogpt.core.resource.model_providers.openai import OpenAIModelName +from autogpt.core.resource.model_providers.schema import ( + ChatModelInfo, + ModelProviderName, +) + + +def test_initial_values(config: Config) -> None: + """ + Test if the initial values of the config class attributes are set correctly. + """ + assert config.continuous_mode is False + assert config.tts_config.speak_mode is False + assert config.fast_llm.startswith("gpt-3.5-turbo") + assert config.smart_llm.startswith("gpt-4") + + +@pytest.mark.asyncio +@mock.patch("openai.resources.models.AsyncModels.list") +async def test_fallback_to_gpt3_if_gpt4_not_available( + mock_list_models: Any, config: Config +) -> None: + """ + Test if models update to gpt-3.5-turbo if gpt-4 is not available. + """ + config.fast_llm = OpenAIModelName.GPT4_TURBO + config.smart_llm = OpenAIModelName.GPT4_TURBO + + mock_list_models.return_value = asyncio.Future() + mock_list_models.return_value.set_result( + AsyncPage( + data=[Model(id=GPT_3_MODEL, created=0, object="model", owned_by="AutoGPT")], + object="Models", # no idea what this should be, but irrelevant + ) + ) + + await apply_overrides_to_config( + config=config, + gpt3only=False, + gpt4only=False, + ) + + assert config.fast_llm == "gpt-3.5-turbo" + assert config.smart_llm == "gpt-3.5-turbo" + + +def test_missing_azure_config(config: Config) -> None: + assert config.openai_credentials is not None + + config_file = config.app_data_dir / "azure_config.yaml" + with pytest.raises(FileNotFoundError): + config.openai_credentials.load_azure_config(config_file) + + config_file.write_text("") + with pytest.raises(ValueError): + config.openai_credentials.load_azure_config(config_file) + + assert config.openai_credentials.api_type != "azure" + assert config.openai_credentials.api_version == "" + assert config.openai_credentials.azure_model_to_deploy_id_map is None + + +@pytest.fixture +def config_with_azure(config: Config): + config_file = config.app_data_dir / "azure_config.yaml" + config_file.write_text( + f""" +azure_api_type: azure +azure_api_version: 2023-06-01-preview +azure_endpoint: https://dummy.openai.azure.com +azure_model_map: + {config.fast_llm}: FAST-LLM_ID + {config.smart_llm}: SMART-LLM_ID + {config.embedding_model}: embedding-deployment-id-for-azure +""" + ) + os.environ["USE_AZURE"] = "True" + os.environ["AZURE_CONFIG_FILE"] = str(config_file) + config_with_azure = ConfigBuilder.build_config_from_env( + project_root=config.project_root + ) + yield config_with_azure + del os.environ["USE_AZURE"] + del os.environ["AZURE_CONFIG_FILE"] + + +def test_azure_config(config_with_azure: Config) -> None: + assert (credentials := config_with_azure.openai_credentials) is not None + assert credentials.api_type == "azure" + assert credentials.api_version == "2023-06-01-preview" + assert credentials.azure_endpoint == SecretStr("https://dummy.openai.azure.com") + assert credentials.azure_model_to_deploy_id_map == { + config_with_azure.fast_llm: "FAST-LLM_ID", + config_with_azure.smart_llm: "SMART-LLM_ID", + config_with_azure.embedding_model: "embedding-deployment-id-for-azure", + } + + fast_llm = config_with_azure.fast_llm + smart_llm = config_with_azure.smart_llm + assert ( + credentials.get_model_access_kwargs(config_with_azure.fast_llm)["model"] + == "FAST-LLM_ID" + ) + assert ( + credentials.get_model_access_kwargs(config_with_azure.smart_llm)["model"] + == "SMART-LLM_ID" + ) + + # Emulate --gpt4only + config_with_azure.fast_llm = smart_llm + assert ( + credentials.get_model_access_kwargs(config_with_azure.fast_llm)["model"] + == "SMART-LLM_ID" + ) + assert ( + credentials.get_model_access_kwargs(config_with_azure.smart_llm)["model"] + == "SMART-LLM_ID" + ) + + # Emulate --gpt3only + config_with_azure.fast_llm = config_with_azure.smart_llm = fast_llm + assert ( + credentials.get_model_access_kwargs(config_with_azure.fast_llm)["model"] + == "FAST-LLM_ID" + ) + assert ( + credentials.get_model_access_kwargs(config_with_azure.smart_llm)["model"] + == "FAST-LLM_ID" + ) + + +@pytest.mark.asyncio +async def test_create_config_gpt4only(config: Config) -> None: + with mock.patch( + "autogpt.core.resource.model_providers.openai.OpenAIProvider.get_available_models" + ) as mock_get_models: + mock_get_models.return_value = [ + ChatModelInfo( + name=GPT_4_MODEL, + provider_name=ModelProviderName.OPENAI, + max_tokens=4096, + ) + ] + await apply_overrides_to_config( + config=config, + gpt4only=True, + ) + assert config.fast_llm == GPT_4_MODEL + assert config.smart_llm == GPT_4_MODEL + + +@pytest.mark.asyncio +async def test_create_config_gpt3only(config: Config) -> None: + with mock.patch( + "autogpt.core.resource.model_providers.openai.OpenAIProvider.get_available_models" + ) as mock_get_models: + mock_get_models.return_value = [ + ChatModelInfo( + name=GPT_3_MODEL, + provider_name=ModelProviderName.OPENAI, + max_tokens=4096, + ) + ] + await apply_overrides_to_config( + config=config, + gpt3only=True, + ) + assert config.fast_llm == GPT_3_MODEL + assert config.smart_llm == GPT_3_MODEL diff --git a/autogpts/autogpt/tests/unit/test_file_operations.py b/autogpts/autogpt/tests/unit/test_file_operations.py new file mode 100644 index 000000000000..94bff3c7f342 --- /dev/null +++ b/autogpts/autogpt/tests/unit/test_file_operations.py @@ -0,0 +1,254 @@ +import os +import re +from pathlib import Path + +import pytest +from pytest_mock import MockerFixture + +import autogpt.commands.file_operations as file_ops +from autogpt.agents.agent import Agent +from autogpt.agents.utils.exceptions import DuplicateOperationError +from autogpt.config import Config +from autogpt.file_storage import FileStorage +from autogpt.memory.vector.memory_item import MemoryItem +from autogpt.memory.vector.utils import Embedding + + +@pytest.fixture() +def file_content(): + return "This is a test file.\n" + + +@pytest.fixture() +def mock_MemoryItem_from_text( + mocker: MockerFixture, mock_embedding: Embedding, config: Config +): + mocker.patch.object( + file_ops.MemoryItemFactory, + "from_text", + new=lambda content, source_type, config, metadata: MemoryItem( + raw_content=content, + summary=f"Summary of content '{content}'", + chunk_summaries=[f"Summary of content '{content}'"], + chunks=[content], + e_summary=mock_embedding, + e_chunks=[mock_embedding], + metadata=metadata | {"source_type": source_type}, + ), + ) + + +@pytest.fixture() +def test_file_name(): + return Path("test_file.txt") + + +@pytest.fixture +def test_file_path(test_file_name: Path, storage: FileStorage): + return storage.get_path(test_file_name) + + +@pytest.fixture() +def test_directory(storage: FileStorage): + return storage.get_path("test_directory") + + +@pytest.fixture() +def test_nested_file(storage: FileStorage): + return storage.get_path("nested/test_file.txt") + + +def test_file_operations_log(): + all_logs = ( + "File Operation Logger\n" + "write: path/to/file1.txt #checksum1\n" + "write: path/to/file2.txt #checksum2\n" + "write: path/to/file3.txt #checksum3\n" + "append: path/to/file2.txt #checksum4\n" + "delete: path/to/file3.txt\n" + ) + logs = all_logs.split("\n") + + expected = [ + ("write", "path/to/file1.txt", "checksum1"), + ("write", "path/to/file2.txt", "checksum2"), + ("write", "path/to/file3.txt", "checksum3"), + ("append", "path/to/file2.txt", "checksum4"), + ("delete", "path/to/file3.txt", None), + ] + assert list(file_ops.operations_from_log(logs)) == expected + + +def test_is_duplicate_operation(agent: Agent, mocker: MockerFixture): + # Prepare a fake state dictionary for the function to use + state = { + "path/to/file1.txt": "checksum1", + "path/to/file2.txt": "checksum2", + } + mocker.patch.object(file_ops, "file_operations_state", lambda _: state) + + # Test cases with write operations + assert ( + file_ops.is_duplicate_operation( + "write", Path("path/to/file1.txt"), agent, "checksum1" + ) + is True + ) + assert ( + file_ops.is_duplicate_operation( + "write", Path("path/to/file1.txt"), agent, "checksum2" + ) + is False + ) + assert ( + file_ops.is_duplicate_operation( + "write", Path("path/to/file3.txt"), agent, "checksum3" + ) + is False + ) + # Test cases with append operations + assert ( + file_ops.is_duplicate_operation( + "append", Path("path/to/file1.txt"), agent, "checksum1" + ) + is False + ) + # Test cases with delete operations + assert ( + file_ops.is_duplicate_operation("delete", Path("path/to/file1.txt"), agent) + is False + ) + assert ( + file_ops.is_duplicate_operation("delete", Path("path/to/file3.txt"), agent) + is True + ) + + +# Test logging a file operation +@pytest.mark.asyncio +async def test_log_operation(agent: Agent): + await file_ops.log_operation("log_test", Path("path/to/test"), agent=agent) + log_entry = agent.get_file_operation_lines()[-1] + assert "log_test: path/to/test" in log_entry + + +def test_text_checksum(file_content: str): + checksum = file_ops.text_checksum(file_content) + different_checksum = file_ops.text_checksum("other content") + assert re.match(r"^[a-fA-F0-9]+$", checksum) is not None + assert checksum != different_checksum + + +@pytest.mark.asyncio +async def test_log_operation_with_checksum(agent: Agent): + await file_ops.log_operation( + "log_test", Path("path/to/test"), agent=agent, checksum="ABCDEF" + ) + log_entry = agent.get_file_operation_lines()[-1] + assert "log_test: path/to/test #ABCDEF" in log_entry + + +@pytest.mark.asyncio +async def test_read_file( + mock_MemoryItem_from_text, + test_file_path: Path, + file_content, + agent: Agent, +): + await agent.workspace.write_file(test_file_path.name, file_content) + await file_ops.log_operation( + "write", Path(test_file_path.name), agent, file_ops.text_checksum(file_content) + ) + content = file_ops.read_file(test_file_path.name, agent=agent) + assert content.replace("\r", "") == file_content + + +def test_read_file_not_found(agent: Agent): + filename = "does_not_exist.txt" + with pytest.raises(FileNotFoundError): + file_ops.read_file(filename, agent=agent) + + +@pytest.mark.asyncio +async def test_write_to_file_relative_path(test_file_name: Path, agent: Agent): + new_content = "This is new content.\n" + await file_ops.write_to_file(test_file_name, new_content, agent=agent) + with open(agent.workspace.get_path(test_file_name), "r", encoding="utf-8") as f: + content = f.read() + assert content == new_content + + +@pytest.mark.asyncio +async def test_write_to_file_absolute_path(test_file_path: Path, agent: Agent): + new_content = "This is new content.\n" + await file_ops.write_to_file(test_file_path, new_content, agent=agent) + with open(test_file_path, "r", encoding="utf-8") as f: + content = f.read() + assert content == new_content + + +@pytest.mark.asyncio +async def test_write_file_logs_checksum(test_file_name: Path, agent: Agent): + new_content = "This is new content.\n" + new_checksum = file_ops.text_checksum(new_content) + await file_ops.write_to_file(test_file_name, new_content, agent=agent) + log_entry = agent.get_file_operation_lines()[-1] + assert log_entry == f"write: {test_file_name} #{new_checksum}" + + +@pytest.mark.asyncio +async def test_write_file_fails_if_content_exists(test_file_name: Path, agent: Agent): + new_content = "This is new content.\n" + await file_ops.log_operation( + "write", + test_file_name, + agent=agent, + checksum=file_ops.text_checksum(new_content), + ) + with pytest.raises(DuplicateOperationError): + await file_ops.write_to_file(test_file_name, new_content, agent=agent) + + +@pytest.mark.asyncio +async def test_write_file_succeeds_if_content_different( + test_file_path: Path, file_content: str, agent: Agent +): + await agent.workspace.write_file(test_file_path.name, file_content) + await file_ops.log_operation( + "write", Path(test_file_path.name), agent, file_ops.text_checksum(file_content) + ) + new_content = "This is different content.\n" + await file_ops.write_to_file(test_file_path.name, new_content, agent=agent) + + +@pytest.mark.asyncio +async def test_list_files(agent: Agent): + # Create files A and B + file_a_name = "file_a.txt" + file_b_name = "file_b.txt" + test_directory = Path("test_directory") + + await agent.workspace.write_file(file_a_name, "This is file A.") + await agent.workspace.write_file(file_b_name, "This is file B.") + + # Create a subdirectory and place a copy of file_a in it + agent.workspace.make_dir(test_directory) + await agent.workspace.write_file( + test_directory / file_a_name, "This is file A in the subdirectory." + ) + + files = file_ops.list_folder(".", agent=agent) + assert file_a_name in files + assert file_b_name in files + assert os.path.join(test_directory, file_a_name) in files + + # Clean up + agent.workspace.delete_file(file_a_name) + agent.workspace.delete_file(file_b_name) + agent.workspace.delete_file(test_directory / file_a_name) + agent.workspace.delete_dir(test_directory) + + # Case 2: Search for a file that does not exist and make sure we don't throw + non_existent_file = "non_existent_file.txt" + files = file_ops.list_folder("", agent=agent) + assert non_existent_file not in files diff --git a/autogpts/autogpt/tests/unit/test_gcs_file_storage.py b/autogpts/autogpt/tests/unit/test_gcs_file_storage.py new file mode 100644 index 000000000000..a9dcd0103df7 --- /dev/null +++ b/autogpts/autogpt/tests/unit/test_gcs_file_storage.py @@ -0,0 +1,200 @@ +import os +import uuid +from pathlib import Path + +import pytest +import pytest_asyncio +from google.auth.exceptions import GoogleAuthError +from google.cloud import storage +from google.cloud.exceptions import NotFound + +from autogpt.file_storage.gcs import GCSFileStorage, GCSFileStorageConfiguration + +try: + storage.Client() +except GoogleAuthError: + pytest.skip("Google Cloud Authentication not configured", allow_module_level=True) + + +@pytest.fixture(scope="module") +def gcs_bucket_name() -> str: + return f"test-bucket-{str(uuid.uuid4())[:8]}" + + +@pytest.fixture(scope="module") +def gcs_root() -> Path: + return Path("/workspaces/AutoGPT-some-unique-task-id") + + +@pytest.fixture(scope="module") +def gcs_storage_uninitialized(gcs_bucket_name: str, gcs_root: Path) -> GCSFileStorage: + os.environ["STORAGE_BUCKET"] = gcs_bucket_name + storage_config = GCSFileStorageConfiguration.from_env() + storage_config.root = gcs_root + storage = GCSFileStorage(storage_config) + yield storage # type: ignore + del os.environ["STORAGE_BUCKET"] + + +def test_initialize(gcs_bucket_name: str, gcs_storage_uninitialized: GCSFileStorage): + gcs = gcs_storage_uninitialized._gcs + + # test that the bucket doesn't exist yet + with pytest.raises(NotFound): + gcs.get_bucket(gcs_bucket_name) + + gcs_storage_uninitialized.initialize() + + # test that the bucket has been created + bucket = gcs.get_bucket(gcs_bucket_name) + + # clean up + bucket.delete(force=True) + + +@pytest.fixture(scope="module") +def gcs_storage(gcs_storage_uninitialized: GCSFileStorage) -> GCSFileStorage: + (gcs_storage := gcs_storage_uninitialized).initialize() + yield gcs_storage # type: ignore + + # Empty & delete the test bucket + gcs_storage._bucket.delete(force=True) + + +def test_workspace_bucket_name( + gcs_storage: GCSFileStorage, + gcs_bucket_name: str, +): + assert gcs_storage._bucket.name == gcs_bucket_name + + +NESTED_DIR = "existing/test/dir" +TEST_FILES: list[tuple[str | Path, str]] = [ + ("existing_test_file_1", "test content 1"), + ("existing_test_file_2.txt", "test content 2"), + (Path("existing_test_file_3"), "test content 3"), + (Path(f"{NESTED_DIR}/test_file_4"), "test content 4"), +] + + +@pytest_asyncio.fixture +async def gcs_storage_with_files(gcs_storage: GCSFileStorage) -> GCSFileStorage: + for file_name, file_content in TEST_FILES: + gcs_storage._bucket.blob( + str(gcs_storage.get_path(file_name)) + ).upload_from_string(file_content) + yield gcs_storage # type: ignore + + +@pytest.mark.asyncio +async def test_read_file(gcs_storage_with_files: GCSFileStorage): + for file_name, file_content in TEST_FILES: + content = gcs_storage_with_files.read_file(file_name) + assert content == file_content + + with pytest.raises(NotFound): + gcs_storage_with_files.read_file("non_existent_file") + + +def test_list_files(gcs_storage_with_files: GCSFileStorage): + # List at root level + assert ( + files := gcs_storage_with_files.list_files() + ) == gcs_storage_with_files.list_files() + assert len(files) > 0 + assert set(files) == set(Path(file_name) for file_name, _ in TEST_FILES) + + # List at nested path + assert ( + nested_files := gcs_storage_with_files.list_files(NESTED_DIR) + ) == gcs_storage_with_files.list_files(NESTED_DIR) + assert len(nested_files) > 0 + assert set(nested_files) == set( + p.relative_to(NESTED_DIR) + for file_name, _ in TEST_FILES + if (p := Path(file_name)).is_relative_to(NESTED_DIR) + ) + + +def test_list_folders(gcs_storage_with_files: GCSFileStorage): + # List recursive + folders = gcs_storage_with_files.list_folders(recursive=True) + assert len(folders) > 0 + assert set(folders) == { + Path("existing"), + Path("existing/test"), + Path("existing/test/dir"), + } + # List non-recursive + folders = gcs_storage_with_files.list_folders(recursive=False) + assert len(folders) > 0 + assert set(folders) == {Path("existing")} + + +@pytest.mark.asyncio +async def test_write_read_file(gcs_storage: GCSFileStorage): + await gcs_storage.write_file("test_file", "test_content") + assert gcs_storage.read_file("test_file") == "test_content" + + +@pytest.mark.asyncio +async def test_overwrite_file(gcs_storage_with_files: GCSFileStorage): + for file_name, _ in TEST_FILES: + await gcs_storage_with_files.write_file(file_name, "new content") + assert gcs_storage_with_files.read_file(file_name) == "new content" + + +def test_delete_file(gcs_storage_with_files: GCSFileStorage): + for file_to_delete, _ in TEST_FILES: + gcs_storage_with_files.delete_file(file_to_delete) + assert not gcs_storage_with_files.exists(file_to_delete) + + +def test_exists(gcs_storage_with_files: GCSFileStorage): + for file_name, _ in TEST_FILES: + assert gcs_storage_with_files.exists(file_name) + + assert not gcs_storage_with_files.exists("non_existent_file") + + +def test_rename_file(gcs_storage_with_files: GCSFileStorage): + for file_name, _ in TEST_FILES: + new_name = str(file_name) + "_renamed" + gcs_storage_with_files.rename(file_name, new_name) + assert gcs_storage_with_files.exists(new_name) + assert not gcs_storage_with_files.exists(file_name) + + +def test_rename_dir(gcs_storage_with_files: GCSFileStorage): + gcs_storage_with_files.rename(NESTED_DIR, "existing/test/dir_renamed") + assert gcs_storage_with_files.exists("existing/test/dir_renamed") + assert not gcs_storage_with_files.exists(NESTED_DIR) + + +def test_clone(gcs_storage_with_files: GCSFileStorage, gcs_root: Path): + cloned = gcs_storage_with_files.clone_with_subroot("existing/test") + assert cloned.root == gcs_root / Path("existing/test") + assert cloned._bucket.name == gcs_storage_with_files._bucket.name + assert cloned.exists("dir") + assert cloned.exists("dir/test_file_4") + + +@pytest.mark.asyncio +async def test_copy_file(storage: GCSFileStorage): + await storage.write_file("test_file.txt", "test content") + storage.copy("test_file.txt", "test_file_copy.txt") + storage.make_dir("dir") + storage.copy("test_file.txt", "dir/test_file_copy.txt") + assert storage.read_file("test_file_copy.txt") == "test content" + assert storage.read_file("dir/test_file_copy.txt") == "test content" + + +@pytest.mark.asyncio +async def test_copy_dir(storage: GCSFileStorage): + storage.make_dir("dir") + storage.make_dir("dir/sub_dir") + await storage.write_file("dir/test_file.txt", "test content") + await storage.write_file("dir/sub_dir/test_file.txt", "test content") + storage.copy("dir", "dir_copy") + assert storage.read_file("dir_copy/test_file.txt") == "test content" + assert storage.read_file("dir_copy/sub_dir/test_file.txt") == "test content" diff --git a/autogpts/autogpt/tests/unit/test_git_commands.py b/autogpts/autogpt/tests/unit/test_git_commands.py new file mode 100644 index 000000000000..31272fb931ee --- /dev/null +++ b/autogpts/autogpt/tests/unit/test_git_commands.py @@ -0,0 +1,44 @@ +import pytest +from git.exc import GitCommandError +from git.repo.base import Repo + +from autogpt.agents.agent import Agent +from autogpt.agents.utils.exceptions import CommandExecutionError +from autogpt.commands.git_operations import clone_repository +from autogpt.file_storage.base import FileStorage + + +@pytest.fixture +def mock_clone_from(mocker): + return mocker.patch.object(Repo, "clone_from") + + +def test_clone_auto_gpt_repository(storage: FileStorage, mock_clone_from, agent: Agent): + mock_clone_from.return_value = None + + repo = "github.com/Significant-Gravitas/Auto-GPT.git" + scheme = "https://" + url = scheme + repo + clone_path = storage.get_path("auto-gpt-repo") + + expected_output = f"Cloned {url} to {clone_path}" + + clone_result = clone_repository(url=url, clone_path=clone_path, agent=agent) + + assert clone_result == expected_output + mock_clone_from.assert_called_once_with( + url=f"{scheme}{agent.legacy_config.github_username}:{agent.legacy_config.github_api_key}@{repo}", # noqa: E501 + to_path=clone_path, + ) + + +def test_clone_repository_error(storage: FileStorage, mock_clone_from, agent: Agent): + url = "https://github.com/this-repository/does-not-exist.git" + clone_path = storage.get_path("does-not-exist") + + mock_clone_from.side_effect = GitCommandError( + "clone", "fatal: repository not found", "" + ) + + with pytest.raises(CommandExecutionError): + clone_repository(url=url, clone_path=clone_path, agent=agent) diff --git a/autogpts/autogpt/tests/unit/test_json_utils.py b/autogpts/autogpt/tests/unit/test_json_utils.py new file mode 100644 index 000000000000..fdd1b0f08b60 --- /dev/null +++ b/autogpts/autogpt/tests/unit/test_json_utils.py @@ -0,0 +1,93 @@ +import json + +import pytest + +from autogpt.core.utils.json_utils import json_loads + +_JSON_FIXABLE: list[tuple[str, str]] = [ + # Missing comma + ('{"name": "John Doe" "age": 30,}', '{"name": "John Doe", "age": 30}'), + ("[1, 2 3]", "[1, 2, 3]"), + # Trailing comma + ('{"name": "John Doe", "age": 30,}', '{"name": "John Doe", "age": 30}'), + ("[1, 2, 3,]", "[1, 2, 3]"), + # Extra comma in object + ('{"name": "John Doe",, "age": 30}', '{"name": "John Doe", "age": 30}'), + # Extra newlines + ('{"name": "John Doe",\n"age": 30}', '{"name": "John Doe", "age": 30}'), + ("[1, 2,\n3]", "[1, 2, 3]"), + # Missing closing brace or bracket + ('{"name": "John Doe", "age": 30', '{"name": "John Doe", "age": 30}'), + ("[1, 2, 3", "[1, 2, 3]"), + # Different numerals + ("[+1, ---2, .5, +-4.5, 123.]", "[1, -2, 0.5, -4.5, 123]"), + ('{"bin": 0b1001, "hex": 0x1A, "oct": 0o17}', '{"bin": 9, "hex": 26, "oct": 15}'), + # Broken array + ( + '[1, 2 3, "yes" true, false null, 25, {"obj": "var"}', + '[1, 2, 3, "yes", true, false, null, 25, {"obj": "var"}]', + ), + # Codeblock + ( + '```json\n{"name": "John Doe", "age": 30}\n```', + '{"name": "John Doe", "age": 30}', + ), + # Mutliple problems + ( + '{"name":"John Doe" "age": 30\n "empty": "","address": ' + "// random comment\n" + '{"city": "New York", "state": "NY"},' + '"skills": ["Python" "C++", "Java",""],', + '{"name": "John Doe", "age": 30, "empty": "", "address": ' + '{"city": "New York", "state": "NY"}, ' + '"skills": ["Python", "C++", "Java", ""]}', + ), + # All good + ( + '{"name": "John Doe", "age": 30, "address": ' + '{"city": "New York", "state": "NY"}, ' + '"skills": ["Python", "C++", "Java"]}', + '{"name": "John Doe", "age": 30, "address": ' + '{"city": "New York", "state": "NY"}, ' + '"skills": ["Python", "C++", "Java"]}', + ), + ("true", "true"), + ("false", "false"), + ("null", "null"), + ("123.5", "123.5"), + ('"Hello, World!"', '"Hello, World!"'), + ("{}", "{}"), + ("[]", "[]"), +] + +_JSON_UNFIXABLE: list[tuple[str, str]] = [ + # Broken booleans and null + ("[TRUE, False, NULL]", "[true, false, null]"), + # Missing values in array + ("[1, , 3]", "[1, 3]"), + # Leading zeros (are treated as octal) + ("[0023, 015]", "[23, 15]"), + # Missing quotes + ('{"name": John Doe}', '{"name": "John Doe"}'), + # Missing opening braces or bracket + ('"name": "John Doe"}', '{"name": "John Doe"}'), + ("1, 2, 3]", "[1, 2, 3]"), +] + + +@pytest.fixture(params=_JSON_FIXABLE) +def fixable_json(request: pytest.FixtureRequest) -> tuple[str, str]: + return request.param + + +@pytest.fixture(params=_JSON_UNFIXABLE) +def unfixable_json(request: pytest.FixtureRequest) -> tuple[str, str]: + return request.param + + +def test_json_loads_fixable(fixable_json: tuple[str, str]): + assert json_loads(fixable_json[0]) == json.loads(fixable_json[1]) + + +def test_json_loads_unfixable(unfixable_json: tuple[str, str]): + assert json_loads(unfixable_json[0]) != json.loads(unfixable_json[1]) diff --git a/autogpts/autogpt/tests/unit/test_local_file_storage.py b/autogpts/autogpt/tests/unit/test_local_file_storage.py new file mode 100644 index 000000000000..971a2e4213ba --- /dev/null +++ b/autogpts/autogpt/tests/unit/test_local_file_storage.py @@ -0,0 +1,211 @@ +from pathlib import Path + +import pytest + +from autogpt.file_storage.local import FileStorageConfiguration, LocalFileStorage + +_ACCESSIBLE_PATHS = [ + Path("."), + Path("test_file.txt"), + Path("test_folder"), + Path("test_folder/test_file.txt"), + Path("test_folder/.."), + Path("test_folder/../test_file.txt"), + Path("test_folder/../test_folder"), + Path("test_folder/../test_folder/test_file.txt"), +] + +_INACCESSIBLE_PATHS = ( + [ + # Takes us out of the workspace + Path(".."), + Path("../test_file.txt"), + Path("../not_auto_gpt_workspace"), + Path("../not_auto_gpt_workspace/test_file.txt"), + Path("test_folder/../.."), + Path("test_folder/../../test_file.txt"), + Path("test_folder/../../not_auto_gpt_workspace"), + Path("test_folder/../../not_auto_gpt_workspace/test_file.txt"), + ] + + [ + # Contains null byte + Path("\0"), + Path("\0test_file.txt"), + Path("test_folder/\0"), + Path("test_folder/\0test_file.txt"), + ] + + [ + # Absolute paths + Path("/"), + Path("/test_file.txt"), + Path("/home"), + ] +) + +_TEST_FILES = [ + Path("test_file.txt"), + Path("dir/test_file.txt"), + Path("dir/test_file2.txt"), + Path("dir/sub_dir/test_file.txt"), +] + +_TEST_DIRS = [ + Path("dir"), + Path("dir/sub_dir"), +] + + +@pytest.fixture() +def storage_root(tmp_path): + return tmp_path / "data" + + +@pytest.fixture() +def storage(storage_root): + return LocalFileStorage( + FileStorageConfiguration(root=storage_root, restrict_to_root=True) + ) + + +@pytest.fixture() +def content(): + return "test content" + + +@pytest.fixture(params=_ACCESSIBLE_PATHS) +def accessible_path(request): + return request.param + + +@pytest.fixture(params=_INACCESSIBLE_PATHS) +def inaccessible_path(request): + return request.param + + +@pytest.fixture(params=_TEST_FILES) +def file_path(request): + return request.param + + +@pytest.mark.asyncio +async def test_open_file(file_path: Path, content: str, storage: LocalFileStorage): + if file_path.parent: + storage.make_dir(file_path.parent) + await storage.write_file(file_path, content) + file = storage.open_file(file_path) + assert file.read() == content + file.close() + storage.delete_file(file_path) + + +@pytest.mark.asyncio +async def test_write_read_file(content: str, storage: LocalFileStorage): + await storage.write_file("test_file.txt", content) + assert storage.read_file("test_file.txt") == content + + +@pytest.mark.asyncio +async def test_list_files(content: str, storage: LocalFileStorage): + storage.make_dir("dir") + storage.make_dir("dir/sub_dir") + await storage.write_file("test_file.txt", content) + await storage.write_file("dir/test_file.txt", content) + await storage.write_file("dir/test_file2.txt", content) + await storage.write_file("dir/sub_dir/test_file.txt", content) + files = storage.list_files() + assert Path("test_file.txt") in files + assert Path("dir/test_file.txt") in files + assert Path("dir/test_file2.txt") in files + assert Path("dir/sub_dir/test_file.txt") in files + storage.delete_file("test_file.txt") + storage.delete_file("dir/test_file.txt") + storage.delete_file("dir/test_file2.txt") + storage.delete_file("dir/sub_dir/test_file.txt") + storage.delete_dir("dir/sub_dir") + storage.delete_dir("dir") + + +@pytest.mark.asyncio +async def test_list_folders(content: str, storage: LocalFileStorage): + storage.make_dir("dir") + storage.make_dir("dir/sub_dir") + await storage.write_file("dir/test_file.txt", content) + await storage.write_file("dir/sub_dir/test_file.txt", content) + folders = storage.list_folders(recursive=False) + folders_recursive = storage.list_folders(recursive=True) + assert Path("dir") in folders + assert Path("dir/sub_dir") not in folders + assert Path("dir") in folders_recursive + assert Path("dir/sub_dir") in folders_recursive + storage.delete_file("dir/test_file.txt") + storage.delete_file("dir/sub_dir/test_file.txt") + storage.delete_dir("dir/sub_dir") + storage.delete_dir("dir") + + +@pytest.mark.asyncio +async def test_exists_delete_file( + file_path: Path, content: str, storage: LocalFileStorage +): + if file_path.parent: + storage.make_dir(file_path.parent) + await storage.write_file(file_path, content) + assert storage.exists(file_path) + storage.delete_file(file_path) + assert not storage.exists(file_path) + + +@pytest.fixture(params=_TEST_DIRS) +def test_make_delete_dir(request, storage: LocalFileStorage): + storage.make_dir(request) + assert storage.exists(request) + storage.delete_dir(request) + assert not storage.exists(request) + + +@pytest.mark.asyncio +async def test_rename(file_path: Path, content: str, storage: LocalFileStorage): + if file_path.parent: + storage.make_dir(file_path.parent) + await storage.write_file(file_path, content) + assert storage.exists(file_path) + storage.rename(file_path, Path(str(file_path) + "_renamed")) + assert not storage.exists(file_path) + assert storage.exists(Path(str(file_path) + "_renamed")) + + +def test_clone_with_subroot(storage: LocalFileStorage): + subroot = storage.clone_with_subroot("dir") + assert subroot.root == storage.root / "dir" + + +def test_get_path_accessible(accessible_path: Path, storage: LocalFileStorage): + full_path = storage.get_path(accessible_path) + assert full_path.is_absolute() + assert full_path.is_relative_to(storage.root) + + +def test_get_path_inaccessible(inaccessible_path: Path, storage: LocalFileStorage): + with pytest.raises(ValueError): + storage.get_path(inaccessible_path) + + +@pytest.mark.asyncio +async def test_copy_file(storage: LocalFileStorage): + await storage.write_file("test_file.txt", "test content") + storage.copy("test_file.txt", "test_file_copy.txt") + storage.make_dir("dir") + storage.copy("test_file.txt", "dir/test_file_copy.txt") + assert storage.read_file("test_file_copy.txt") == "test content" + assert storage.read_file("dir/test_file_copy.txt") == "test content" + + +@pytest.mark.asyncio +async def test_copy_dir(storage: LocalFileStorage): + storage.make_dir("dir") + storage.make_dir("dir/sub_dir") + await storage.write_file("dir/test_file.txt", "test content") + await storage.write_file("dir/sub_dir/test_file.txt", "test content") + storage.copy("dir", "dir_copy") + assert storage.read_file("dir_copy/test_file.txt") == "test content" + assert storage.read_file("dir_copy/sub_dir/test_file.txt") == "test content" diff --git a/autogpts/autogpt/tests/unit/test_logs.py b/autogpts/autogpt/tests/unit/test_logs.py new file mode 100644 index 000000000000..1ded61f31d50 --- /dev/null +++ b/autogpts/autogpt/tests/unit/test_logs.py @@ -0,0 +1,36 @@ +import pytest + +from autogpt.logs.utils import remove_color_codes + + +@pytest.mark.parametrize( + "raw_text, clean_text", + [ + ( + "COMMAND = \x1b[36mbrowse_website\x1b[0m " + "ARGUMENTS = \x1b[36m{'url': 'https://www.google.com'," + " 'question': 'What is the capital of France?'}\x1b[0m", + "COMMAND = browse_website " + "ARGUMENTS = {'url': 'https://www.google.com'," + " 'question': 'What is the capital of France?'}", + ), + ( + "{'Schaue dir meine Projekte auf github () an, als auch meine Webseiten': " + "'https://github.com/Significant-Gravitas/AutoGPT," + " https://discord.gg/autogpt und https://twitter.com/Auto_GPT'}", + "{'Schaue dir meine Projekte auf github () an, als auch meine Webseiten': " + "'https://github.com/Significant-Gravitas/AutoGPT," + " https://discord.gg/autogpt und https://twitter.com/Auto_GPT'}", + ), + ("", ""), + ("hello", "hello"), + ("hello\x1B[31m world", "hello world"), + ("\x1B[36mHello,\x1B[32m World!", "Hello, World!"), + ( + "\x1B[1m\x1B[31mError:\x1B[0m\x1B[31m file not found", + "Error: file not found", + ), + ], +) +def test_remove_color_codes(raw_text, clean_text): + assert remove_color_codes(raw_text) == clean_text diff --git a/autogpts/autogpt/tests/unit/test_plugins.py b/autogpts/autogpt/tests/unit/test_plugins.py new file mode 100644 index 000000000000..f180d92bc6fe --- /dev/null +++ b/autogpts/autogpt/tests/unit/test_plugins.py @@ -0,0 +1,125 @@ +import os + +import yaml + +from autogpt.config.config import Config +from autogpt.plugins import inspect_zip_for_modules, scan_plugins +from autogpt.plugins.plugin_config import PluginConfig +from autogpt.plugins.plugins_config import PluginsConfig + +PLUGINS_TEST_DIR = "tests/unit/data/test_plugins" +PLUGIN_TEST_ZIP_FILE = "Auto-GPT-Plugin-Test-master.zip" +PLUGIN_TEST_INIT_PY = "Auto-GPT-Plugin-Test-master/src/auto_gpt_vicuna/__init__.py" +PLUGIN_TEST_OPENAI = "https://weathergpt.vercel.app/" + + +def test_scan_plugins_openai(config: Config): + config.plugins_openai = [PLUGIN_TEST_OPENAI] + plugins_config = config.plugins_config + plugins_config.plugins[PLUGIN_TEST_OPENAI] = PluginConfig( + name=PLUGIN_TEST_OPENAI, enabled=True + ) + + # Test that the function returns the correct number of plugins + result = scan_plugins(config) + assert len(result) == 1 + + +def test_scan_plugins_generic(config: Config): + # Test that the function returns the correct number of plugins + plugins_config = config.plugins_config + plugins_config.plugins["auto_gpt_guanaco"] = PluginConfig( + name="auto_gpt_guanaco", enabled=True + ) + plugins_config.plugins["AutoGPTPVicuna"] = PluginConfig( + name="AutoGPTPVicuna", enabled=True + ) + result = scan_plugins(config) + plugin_class_names = [plugin.__class__.__name__ for plugin in result] + + assert len(result) == 2 + assert "AutoGPTGuanaco" in plugin_class_names + assert "AutoGPTPVicuna" in plugin_class_names + + +def test_scan_plugins_not_enabled(config: Config): + # Test that the function returns the correct number of plugins + plugins_config = config.plugins_config + plugins_config.plugins["auto_gpt_guanaco"] = PluginConfig( + name="auto_gpt_guanaco", enabled=True + ) + plugins_config.plugins["auto_gpt_vicuna"] = PluginConfig( + name="auto_gptp_vicuna", enabled=False + ) + result = scan_plugins(config) + plugin_class_names = [plugin.__class__.__name__ for plugin in result] + + assert len(result) == 1 + assert "AutoGPTGuanaco" in plugin_class_names + assert "AutoGPTPVicuna" not in plugin_class_names + + +def test_inspect_zip_for_modules(): + result = inspect_zip_for_modules(str(f"{PLUGINS_TEST_DIR}/{PLUGIN_TEST_ZIP_FILE}")) + assert result == [PLUGIN_TEST_INIT_PY] + + +def test_create_base_config(config: Config): + """ + Test the backwards-compatibility shim to convert old plugin allow/deny list + to a config file. + """ + config.plugins_allowlist = ["a", "b"] + config.plugins_denylist = ["c", "d"] + + os.remove(config.plugins_config_file) + plugins_config = PluginsConfig.load_config( + plugins_config_file=config.plugins_config_file, + plugins_denylist=config.plugins_denylist, + plugins_allowlist=config.plugins_allowlist, + ) + + # Check the structure of the plugins config data + assert len(plugins_config.plugins) == 4 + assert plugins_config.get("a").enabled + assert plugins_config.get("b").enabled + assert not plugins_config.get("c").enabled + assert not plugins_config.get("d").enabled + + # Check the saved config file + with open(config.plugins_config_file, "r") as saved_config_file: + saved_config = yaml.load(saved_config_file, Loader=yaml.SafeLoader) + + assert saved_config == { + "a": {"enabled": True, "config": {}}, + "b": {"enabled": True, "config": {}}, + "c": {"enabled": False, "config": {}}, + "d": {"enabled": False, "config": {}}, + } + + +def test_load_config(config: Config): + """ + Test that the plugin config is loaded correctly from the plugins_config.yaml file. + """ + # Create a test config and write it to disk + test_config = { + "a": {"enabled": True, "config": {"api_key": "1234"}}, + "b": {"enabled": False, "config": {}}, + } + with open(config.plugins_config_file, "w+") as f: + f.write(yaml.dump(test_config)) + + # Load the config from disk + plugins_config = PluginsConfig.load_config( + plugins_config_file=config.plugins_config_file, + plugins_denylist=config.plugins_denylist, + plugins_allowlist=config.plugins_allowlist, + ) + + # Check that the loaded config is equal to the test config + assert len(plugins_config.plugins) == 2 + assert plugins_config.get("a").enabled + assert plugins_config.get("a").config == {"api_key": "1234"} + assert not plugins_config.get("b").enabled + assert plugins_config.get("b").config == {} diff --git a/autogpts/autogpt/tests/unit/test_prompt_config.py b/autogpts/autogpt/tests/unit/test_prompt_config.py new file mode 100644 index 000000000000..ccadb191da48 --- /dev/null +++ b/autogpts/autogpt/tests/unit/test_prompt_config.py @@ -0,0 +1,42 @@ +from autogpt.config.ai_directives import AIDirectives + +""" +Test cases for the PromptConfig class, which handles loads the Prompts configuration +settings from a YAML file. +""" + + +def test_prompt_config_loading(tmp_path): + """Test if the prompt configuration loads correctly""" + + yaml_content = """ +constraints: +- A test constraint +- Another test constraint +- A third test constraint +resources: +- A test resource +- Another test resource +- A third test resource +best_practices: +- A test best-practice +- Another test best-practice +- A third test best-practice +""" + prompt_settings_file = tmp_path / "test_prompt_settings.yaml" + prompt_settings_file.write_text(yaml_content) + + prompt_config = AIDirectives.from_file(prompt_settings_file) + + assert len(prompt_config.constraints) == 3 + assert prompt_config.constraints[0] == "A test constraint" + assert prompt_config.constraints[1] == "Another test constraint" + assert prompt_config.constraints[2] == "A third test constraint" + assert len(prompt_config.resources) == 3 + assert prompt_config.resources[0] == "A test resource" + assert prompt_config.resources[1] == "Another test resource" + assert prompt_config.resources[2] == "A third test resource" + assert len(prompt_config.best_practices) == 3 + assert prompt_config.best_practices[0] == "A test best-practice" + assert prompt_config.best_practices[1] == "Another test best-practice" + assert prompt_config.best_practices[2] == "A third test best-practice" diff --git a/autogpts/autogpt/tests/unit/test_s3_file_storage.py b/autogpts/autogpt/tests/unit/test_s3_file_storage.py new file mode 100644 index 000000000000..82bd5428c5c8 --- /dev/null +++ b/autogpts/autogpt/tests/unit/test_s3_file_storage.py @@ -0,0 +1,195 @@ +import os +import uuid +from pathlib import Path + +import pytest +import pytest_asyncio +from botocore.exceptions import ClientError + +from autogpt.file_storage.s3 import S3FileStorage, S3FileStorageConfiguration + +if not (os.getenv("S3_ENDPOINT_URL") and os.getenv("AWS_ACCESS_KEY_ID")): + pytest.skip("S3 environment variables are not set", allow_module_level=True) + + +@pytest.fixture +def s3_bucket_name() -> str: + return f"test-bucket-{str(uuid.uuid4())[:8]}" + + +@pytest.fixture +def s3_root() -> Path: + return Path("/workspaces/AutoGPT-some-unique-task-id") + + +@pytest.fixture +def s3_storage_uninitialized(s3_bucket_name: str, s3_root: Path) -> S3FileStorage: + os.environ["STORAGE_BUCKET"] = s3_bucket_name + storage_config = S3FileStorageConfiguration.from_env() + storage_config.root = s3_root + storage = S3FileStorage(storage_config) + yield storage # type: ignore + del os.environ["STORAGE_BUCKET"] + + +def test_initialize(s3_bucket_name: str, s3_storage_uninitialized: S3FileStorage): + s3 = s3_storage_uninitialized._s3 + + # test that the bucket doesn't exist yet + with pytest.raises(ClientError): + s3.meta.client.head_bucket(Bucket=s3_bucket_name) + + s3_storage_uninitialized.initialize() + + # test that the bucket has been created + s3.meta.client.head_bucket(Bucket=s3_bucket_name) + + +def test_workspace_bucket_name( + s3_storage: S3FileStorage, + s3_bucket_name: str, +): + assert s3_storage._bucket.name == s3_bucket_name + + +@pytest.fixture +def s3_storage(s3_storage_uninitialized: S3FileStorage) -> S3FileStorage: + (s3_storage := s3_storage_uninitialized).initialize() + yield s3_storage # type: ignore + + # Empty & delete the test bucket + s3_storage._bucket.objects.all().delete() + s3_storage._bucket.delete() + + +NESTED_DIR = "existing/test/dir" +TEST_FILES: list[tuple[str | Path, str]] = [ + ("existing_test_file_1", "test content 1"), + ("existing_test_file_2.txt", "test content 2"), + (Path("existing_test_file_3"), "test content 3"), + (Path(f"{NESTED_DIR}/test_file_4"), "test content 4"), +] + + +@pytest_asyncio.fixture +async def s3_storage_with_files(s3_storage: S3FileStorage) -> S3FileStorage: + for file_name, file_content in TEST_FILES: + s3_storage._bucket.Object(str(s3_storage.get_path(file_name))).put( + Body=file_content + ) + yield s3_storage # type: ignore + + +@pytest.mark.asyncio +async def test_read_file(s3_storage_with_files: S3FileStorage): + for file_name, file_content in TEST_FILES: + content = s3_storage_with_files.read_file(file_name) + assert content == file_content + + with pytest.raises(ClientError): + s3_storage_with_files.read_file("non_existent_file") + + +def test_list_files(s3_storage_with_files: S3FileStorage): + # List at root level + assert ( + files := s3_storage_with_files.list_files() + ) == s3_storage_with_files.list_files() + assert len(files) > 0 + assert set(files) == set(Path(file_name) for file_name, _ in TEST_FILES) + + # List at nested path + assert ( + nested_files := s3_storage_with_files.list_files(NESTED_DIR) + ) == s3_storage_with_files.list_files(NESTED_DIR) + assert len(nested_files) > 0 + assert set(nested_files) == set( + p.relative_to(NESTED_DIR) + for file_name, _ in TEST_FILES + if (p := Path(file_name)).is_relative_to(NESTED_DIR) + ) + + +def test_list_folders(s3_storage_with_files: S3FileStorage): + # List recursive + folders = s3_storage_with_files.list_folders(recursive=True) + assert len(folders) > 0 + assert set(folders) == { + Path("existing"), + Path("existing/test"), + Path("existing/test/dir"), + } + # List non-recursive + folders = s3_storage_with_files.list_folders(recursive=False) + assert len(folders) > 0 + assert set(folders) == {Path("existing")} + + +@pytest.mark.asyncio +async def test_write_read_file(s3_storage: S3FileStorage): + await s3_storage.write_file("test_file", "test_content") + assert s3_storage.read_file("test_file") == "test_content" + + +@pytest.mark.asyncio +async def test_overwrite_file(s3_storage_with_files: S3FileStorage): + for file_name, _ in TEST_FILES: + await s3_storage_with_files.write_file(file_name, "new content") + assert s3_storage_with_files.read_file(file_name) == "new content" + + +def test_delete_file(s3_storage_with_files: S3FileStorage): + for file_to_delete, _ in TEST_FILES: + s3_storage_with_files.delete_file(file_to_delete) + with pytest.raises(ClientError): + s3_storage_with_files.read_file(file_to_delete) + + +def test_exists(s3_storage_with_files: S3FileStorage): + for file_name, _ in TEST_FILES: + assert s3_storage_with_files.exists(file_name) + + assert not s3_storage_with_files.exists("non_existent_file") + + +def test_rename_file(s3_storage_with_files: S3FileStorage): + for file_name, _ in TEST_FILES: + new_name = str(file_name) + "_renamed" + s3_storage_with_files.rename(file_name, new_name) + assert s3_storage_with_files.exists(new_name) + assert not s3_storage_with_files.exists(file_name) + + +def test_rename_dir(s3_storage_with_files: S3FileStorage): + s3_storage_with_files.rename(NESTED_DIR, "existing/test/dir_renamed") + assert s3_storage_with_files.exists("existing/test/dir_renamed") + assert not s3_storage_with_files.exists(NESTED_DIR) + + +def test_clone(s3_storage_with_files: S3FileStorage, s3_root: Path): + cloned = s3_storage_with_files.clone_with_subroot("existing/test") + assert cloned.root == s3_root / Path("existing/test") + assert cloned._bucket.name == s3_storage_with_files._bucket.name + assert cloned.exists("dir") + assert cloned.exists("dir/test_file_4") + + +@pytest.mark.asyncio +async def test_copy_file(storage: S3FileStorage): + await storage.write_file("test_file.txt", "test content") + storage.copy("test_file.txt", "test_file_copy.txt") + storage.make_dir("dir") + storage.copy("test_file.txt", "dir/test_file_copy.txt") + assert storage.read_file("test_file_copy.txt") == "test content" + assert storage.read_file("dir/test_file_copy.txt") == "test content" + + +@pytest.mark.asyncio +async def test_copy_dir(storage: S3FileStorage): + storage.make_dir("dir") + storage.make_dir("dir/sub_dir") + await storage.write_file("dir/test_file.txt", "test content") + await storage.write_file("dir/sub_dir/test_file.txt", "test content") + storage.copy("dir", "dir_copy") + assert storage.read_file("dir_copy/test_file.txt") == "test content" + assert storage.read_file("dir_copy/sub_dir/test_file.txt") == "test content" diff --git a/autogpts/autogpt/tests/unit/test_spinner.py b/autogpts/autogpt/tests/unit/test_spinner.py new file mode 100644 index 000000000000..9f2cbac651c1 --- /dev/null +++ b/autogpts/autogpt/tests/unit/test_spinner.py @@ -0,0 +1,35 @@ +import time + +from autogpt.app.spinner import Spinner + +ALMOST_DONE_MESSAGE = "Almost done..." +PLEASE_WAIT = "Please wait..." + + +def test_spinner_initializes_with_default_values(): + """Tests that the spinner initializes with default values.""" + with Spinner() as spinner: + assert spinner.message == "Loading..." + assert spinner.delay == 0.1 + + +def test_spinner_initializes_with_custom_values(): + """Tests that the spinner initializes with custom message and delay values.""" + with Spinner(message=PLEASE_WAIT, delay=0.2) as spinner: + assert spinner.message == PLEASE_WAIT + assert spinner.delay == 0.2 + + +# +def test_spinner_stops_spinning(): + """Tests that the spinner starts spinning and stops spinning without errors.""" + with Spinner() as spinner: + time.sleep(1) + assert not spinner.running + + +def test_spinner_can_be_used_as_context_manager(): + """Tests that the spinner can be used as a context manager.""" + with Spinner() as spinner: + assert spinner.running + assert not spinner.running diff --git a/autogpts/autogpt/tests/unit/test_text_file_parsers.py b/autogpts/autogpt/tests/unit/test_text_file_parsers.py new file mode 100644 index 000000000000..c13241580992 --- /dev/null +++ b/autogpts/autogpt/tests/unit/test_text_file_parsers.py @@ -0,0 +1,170 @@ +import json +import logging +import os.path +import tempfile +from pathlib import Path +from xml.etree import ElementTree + +import docx +import pytest +import yaml +from bs4 import BeautifulSoup + +from autogpt.commands.file_operations_utils import ( + decode_textual_file, + is_file_binary_fn, +) + +logger = logging.getLogger(__name__) + +plain_text_str = "Hello, world!" + + +def mock_text_file(): + with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".txt") as f: + f.write(plain_text_str) + return f.name + + +def mock_csv_file(): + with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".csv") as f: + f.write(plain_text_str) + return f.name + + +def mock_pdf_file(): + with tempfile.NamedTemporaryFile(mode="wb", delete=False, suffix=".pdf") as f: + # Create a new PDF and add a page with the text plain_text_str + # Write the PDF header + f.write(b"%PDF-1.7\n") + # Write the document catalog + f.write(b"1 0 obj\n") + f.write(b"<< /Type /Catalog /Pages 2 0 R >>\n") + f.write(b"endobj\n") + # Write the page object + f.write(b"2 0 obj\n") + f.write( + b"<< /Type /Page /Parent 1 0 R /Resources << /Font << /F1 3 0 R >> >> " + b"/MediaBox [0 0 612 792] /Contents 4 0 R >>\n" + ) + f.write(b"endobj\n") + # Write the font object + f.write(b"3 0 obj\n") + f.write( + b"<< /Type /Font /Subtype /Type1 /Name /F1 /BaseFont /Helvetica-Bold >>\n" + ) + f.write(b"endobj\n") + # Write the page contents object + f.write(b"4 0 obj\n") + f.write(b"<< /Length 25 >>\n") + f.write(b"stream\n") + f.write(b"BT\n/F1 12 Tf\n72 720 Td\n(Hello, world!) Tj\nET\n") + f.write(b"endstream\n") + f.write(b"endobj\n") + # Write the cross-reference table + f.write(b"xref\n") + f.write(b"0 5\n") + f.write(b"0000000000 65535 f \n") + f.write(b"0000000017 00000 n \n") + f.write(b"0000000073 00000 n \n") + f.write(b"0000000123 00000 n \n") + f.write(b"0000000271 00000 n \n") + f.write(b"trailer\n") + f.write(b"<< /Size 5 /Root 1 0 R >>\n") + f.write(b"startxref\n") + f.write(b"380\n") + f.write(b"%%EOF\n") + f.write(b"\x00") + return f.name + + +def mock_docx_file(): + with tempfile.NamedTemporaryFile(mode="wb", delete=False, suffix=".docx") as f: + document = docx.Document() + document.add_paragraph(plain_text_str) + document.save(f.name) + return f.name + + +def mock_json_file(): + with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".json") as f: + json.dump({"text": plain_text_str}, f) + return f.name + + +def mock_xml_file(): + root = ElementTree.Element("text") + root.text = plain_text_str + tree = ElementTree.ElementTree(root) + with tempfile.NamedTemporaryFile(mode="wb", delete=False, suffix=".xml") as f: + tree.write(f) + return f.name + + +def mock_yaml_file(): + with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".yaml") as f: + yaml.dump({"text": plain_text_str}, f) + return f.name + + +def mock_html_file(): + html = BeautifulSoup( + "" + "This is a test" + f"

{plain_text_str}

" + "", + "html.parser", + ) + with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".html") as f: + f.write(str(html)) + return f.name + + +def mock_md_file(): + with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".md") as f: + f.write(f"# {plain_text_str}!\n") + return f.name + + +def mock_latex_file(): + with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".tex") as f: + latex_str = ( + r"\documentclass{article}" + r"\begin{document}" + f"{plain_text_str}" + r"\end{document}" + ) + f.write(latex_str) + return f.name + + +respective_file_creation_functions = { + ".txt": mock_text_file, + ".csv": mock_csv_file, + ".pdf": mock_pdf_file, + ".docx": mock_docx_file, + ".json": mock_json_file, + ".xml": mock_xml_file, + ".yaml": mock_yaml_file, + ".html": mock_html_file, + ".md": mock_md_file, + ".tex": mock_latex_file, +} +binary_files_extensions = [".pdf", ".docx"] + + +@pytest.mark.parametrize( + "file_extension, c_file_creator", + respective_file_creation_functions.items(), +) +def test_parsers(file_extension, c_file_creator): + created_file_path = Path(c_file_creator()) + with open(created_file_path, "rb") as file: + loaded_text = decode_textual_file(file, os.path.splitext(file.name)[1], logger) + + assert plain_text_str in loaded_text + + should_be_binary = file_extension in binary_files_extensions + assert should_be_binary == is_file_binary_fn(file) + + created_file_path.unlink() # cleanup diff --git a/autogpts/autogpt/tests/unit/test_url_validation.py b/autogpts/autogpt/tests/unit/test_url_validation.py new file mode 100644 index 000000000000..cfecc48dd154 --- /dev/null +++ b/autogpts/autogpt/tests/unit/test_url_validation.py @@ -0,0 +1,157 @@ +import pytest +from pytest import raises + +from autogpt.url_utils.validators import validate_url + + +@validate_url +def dummy_method(url): + return url + + +successful_test_data = ( + ("https://google.com/search?query=abc"), + ("https://google.com/search?query=abc&p=123"), + ("http://google.com/"), + ("http://a.lot.of.domain.net/param1/param2"), +) + + +@pytest.mark.parametrize("url", successful_test_data) +def test_url_validation_succeeds(url): + assert dummy_method(url) == url + + +@pytest.mark.parametrize( + "url,expected_error", + [ + ("htt://example.com", "Invalid URL format"), + ("httppp://example.com", "Invalid URL format"), + (" https://example.com", "Invalid URL format"), + ("http://?query=q", "Missing Scheme or Network location"), + ], +) +def test_url_validation_fails_invalid_url(url, expected_error): + with raises(ValueError, match=expected_error): + dummy_method(url) + + +local_file = ( + ("file://localhost"), + ("file://localhost/home/reinier/secrets.txt"), + ("file:///home/reinier/secrets.txt"), + ("file:///C:/Users/Reinier/secrets.txt"), +) + + +@pytest.mark.parametrize("url", local_file) +def test_url_validation_fails_local_path(url): + with raises(ValueError): + dummy_method(url) + + +def test_happy_path_valid_url(): + """ + Test that the function successfully validates a valid URL with `http://` or + `https://` prefix. + """ + + @validate_url + def test_func(url): + return url + + assert test_func("https://www.google.com") == "https://www.google.com" + assert test_func("http://www.google.com") == "http://www.google.com" + + +def test_general_behavior_additional_path_parameters_query_string(): + """ + Test that the function successfully validates a valid URL with additional path, + parameters, and query string. + """ + + @validate_url + def test_func(url): + return url + + assert ( + test_func("https://www.google.com/search?q=python") + == "https://www.google.com/search?q=python" + ) + + +def test_edge_case_missing_scheme_or_network_location(): + """ + Test that the function raises a ValueError if the URL is missing scheme or + network location. + """ + + @validate_url + def test_func(url): + return url + + with pytest.raises(ValueError): + test_func("www.google.com") + + +def test_edge_case_local_file_access(): + """Test that the function raises a ValueError if the URL has local file access""" + + @validate_url + def test_func(url): + return url + + with pytest.raises(ValueError): + test_func("file:///etc/passwd") + + +def test_general_behavior_sanitizes_url(): + """Test that the function sanitizes the URL by removing unnecessary components""" + + @validate_url + def test_func(url): + return url + + assert ( + test_func("https://www.google.com/search?q=python#top") + == "https://www.google.com/search?q=python" + ) + + +def test_general_behavior_invalid_url_format(): + """ + Test that the function raises a ValueError if the URL has an invalid format + (e.g. missing slashes) + """ + + @validate_url + def test_func(url): + return url + + with pytest.raises(ValueError): + test_func("https:www.google.com") + + +def test_url_with_special_chars(): + """ + Tests that the function can handle URLs that contain unusual but valid characters. + """ + url = "https://example.com/path%20with%20spaces" + assert dummy_method(url) == url + + +def test_extremely_long_url(): + """ + Tests that the function raises a ValueError if the URL is over 2000 characters. + """ + url = "http://example.com/" + "a" * 2000 + with raises(ValueError, match="URL is too long"): + dummy_method(url) + + +def test_internationalized_url(): + """ + Tests that the function can handle internationalized URLs with non-ASCII characters. + """ + url = "http://例子.测试" + assert dummy_method(url) == url diff --git a/autogpts/autogpt/tests/unit/test_utils.py b/autogpts/autogpt/tests/unit/test_utils.py new file mode 100644 index 000000000000..9224f7212219 --- /dev/null +++ b/autogpts/autogpt/tests/unit/test_utils.py @@ -0,0 +1,332 @@ +import json +import os +from pathlib import Path +from unittest.mock import patch + +import pytest +import requests +from git import InvalidGitRepositoryError + +import autogpt.app.utils +from autogpt.app.utils import ( + get_bulletin_from_web, + get_current_git_branch, + get_latest_bulletin, + set_env_config_value, +) +from autogpt.core.utils.json_utils import extract_dict_from_json +from autogpt.utils import validate_yaml_file +from tests.utils import skip_in_ci + + +@pytest.fixture +def valid_json_response() -> dict: + return { + "thoughts": { + "text": "My task is complete. I will use the 'task_complete' command " + "to shut down.", + "reasoning": "I will use the 'task_complete' command because it allows me " + "to shut down and signal that my task is complete.", + "plan": "I will use the 'task_complete' command with the reason " + "'Task complete: retrieved Tesla's revenue in 2022.' to shut down.", + "criticism": "I need to ensure that I have completed all necessary tasks " + "before shutting down.", + "speak": "All done!", + }, + "command": { + "name": "task_complete", + "args": {"reason": "Task complete: retrieved Tesla's revenue in 2022."}, + }, + } + + +@pytest.fixture +def invalid_json_response() -> dict: + return { + "thoughts": { + "text": "My task is complete. I will use the 'task_complete' command " + "to shut down.", + "reasoning": "I will use the 'task_complete' command because it allows me " + "to shut down and signal that my task is complete.", + "plan": "I will use the 'task_complete' command with the reason " + "'Task complete: retrieved Tesla's revenue in 2022.' to shut down.", + "criticism": "I need to ensure that I have completed all necessary tasks " + "before shutting down.", + "speak": "", + }, + "command": {"name": "", "args": {}}, + } + + +def test_validate_yaml_file_valid(): + with open("valid_test_file.yaml", "w") as f: + f.write("setting: value") + result, message = validate_yaml_file("valid_test_file.yaml") + os.remove("valid_test_file.yaml") + + assert result is True + assert "Successfully validated" in message + + +def test_validate_yaml_file_not_found(): + result, message = validate_yaml_file("non_existent_file.yaml") + + assert result is False + assert "wasn't found" in message + + +def test_validate_yaml_file_invalid(): + with open("invalid_test_file.yaml", "w") as f: + f.write( + "settings:\n" + " first_setting: value\n" + " second_setting: value\n" + " nested_setting: value\n" + " third_setting: value\n" + "unindented_setting: value" + ) + result, message = validate_yaml_file("invalid_test_file.yaml") + os.remove("invalid_test_file.yaml") + print(result) + print(message) + assert result is False + assert "There was an issue while trying to read" in message + + +@patch("requests.get") +def test_get_bulletin_from_web_success(mock_get): + expected_content = "Test bulletin from web" + + mock_get.return_value.status_code = 200 + mock_get.return_value.text = expected_content + bulletin = get_bulletin_from_web() + + assert expected_content in bulletin + mock_get.assert_called_with( + "https://raw.githubusercontent.com/Significant-Gravitas/AutoGPT/master/autogpts/autogpt/BULLETIN.md" # noqa: E501 + ) + + +@patch("requests.get") +def test_get_bulletin_from_web_failure(mock_get): + mock_get.return_value.status_code = 404 + bulletin = get_bulletin_from_web() + + assert bulletin == "" + + +@patch("requests.get") +def test_get_bulletin_from_web_exception(mock_get): + mock_get.side_effect = requests.exceptions.RequestException() + bulletin = get_bulletin_from_web() + + assert bulletin == "" + + +def test_get_latest_bulletin_no_file(): + if os.path.exists("data/CURRENT_BULLETIN.md"): + os.remove("data/CURRENT_BULLETIN.md") + + bulletin, is_new = get_latest_bulletin() + assert is_new + + +def test_get_latest_bulletin_with_file(): + expected_content = "Test bulletin" + with open("data/CURRENT_BULLETIN.md", "w", encoding="utf-8") as f: + f.write(expected_content) + + with patch("autogpt.app.utils.get_bulletin_from_web", return_value=""): + bulletin, is_new = get_latest_bulletin() + assert expected_content in bulletin + assert is_new is False + + os.remove("data/CURRENT_BULLETIN.md") + + +def test_get_latest_bulletin_with_new_bulletin(): + with open("data/CURRENT_BULLETIN.md", "w", encoding="utf-8") as f: + f.write("Old bulletin") + + expected_content = "New bulletin from web" + with patch( + "autogpt.app.utils.get_bulletin_from_web", return_value=expected_content + ): + bulletin, is_new = get_latest_bulletin() + assert "::NEW BULLETIN::" in bulletin + assert expected_content in bulletin + assert is_new + + os.remove("data/CURRENT_BULLETIN.md") + + +def test_get_latest_bulletin_new_bulletin_same_as_old_bulletin(): + expected_content = "Current bulletin" + with open("data/CURRENT_BULLETIN.md", "w", encoding="utf-8") as f: + f.write(expected_content) + + with patch( + "autogpt.app.utils.get_bulletin_from_web", return_value=expected_content + ): + bulletin, is_new = get_latest_bulletin() + assert expected_content in bulletin + assert is_new is False + + os.remove("data/CURRENT_BULLETIN.md") + + +@skip_in_ci +def test_get_current_git_branch(): + branch_name = get_current_git_branch() + assert branch_name != "" + + +@patch("autogpt.app.utils.Repo") +def test_get_current_git_branch_success(mock_repo): + mock_repo.return_value.active_branch.name = "test-branch" + branch_name = get_current_git_branch() + + assert branch_name == "test-branch" + + +@patch("autogpt.app.utils.Repo") +def test_get_current_git_branch_failure(mock_repo): + mock_repo.side_effect = InvalidGitRepositoryError() + branch_name = get_current_git_branch() + + assert branch_name == "" + + +def test_extract_json_from_response(valid_json_response: dict): + emulated_response_from_openai = json.dumps(valid_json_response) + assert extract_dict_from_json(emulated_response_from_openai) == valid_json_response + + +def test_extract_json_from_response_wrapped_in_code_block(valid_json_response: dict): + emulated_response_from_openai = "```" + json.dumps(valid_json_response) + "```" + assert extract_dict_from_json(emulated_response_from_openai) == valid_json_response + + +def test_extract_json_from_response_wrapped_in_code_block_with_language( + valid_json_response: dict, +): + emulated_response_from_openai = "```json" + json.dumps(valid_json_response) + "```" + assert extract_dict_from_json(emulated_response_from_openai) == valid_json_response + + +def test_extract_json_from_response_json_contained_in_string(valid_json_response: dict): + emulated_response_from_openai = ( + "sentence1" + json.dumps(valid_json_response) + "sentence2" + ) + assert extract_dict_from_json(emulated_response_from_openai) == valid_json_response + + +@pytest.fixture +def mock_env_file_path(tmp_path): + return tmp_path / ".env" + + +env_file_initial_content = """ +# This is a comment +EXISTING_KEY=EXISTING_VALUE + +## This is also a comment +# DISABLED_KEY=DISABLED_VALUE + +# Another comment +UNUSED_KEY=UNUSED_VALUE +""" + + +@pytest.fixture +def mock_env_file(mock_env_file_path: Path, monkeypatch: pytest.MonkeyPatch): + mock_env_file_path.write_text(env_file_initial_content) + monkeypatch.setattr(autogpt.app.utils, "ENV_FILE_PATH", mock_env_file_path) + return mock_env_file_path + + +@pytest.fixture +def mock_environ(monkeypatch: pytest.MonkeyPatch): + env = {} + monkeypatch.setattr(os, "environ", env) + return env + + +def test_set_env_config_value_updates_existing_key( + mock_env_file: Path, mock_environ: dict +): + # Before updating, ensure the original content is as expected + with mock_env_file.open("r") as file: + assert file.readlines() == env_file_initial_content.splitlines(True) + + set_env_config_value("EXISTING_KEY", "NEW_VALUE") + with mock_env_file.open("r") as file: + content = file.readlines() + + # Ensure only the relevant line is altered + expected_content_lines = [ + "\n", + "# This is a comment\n", + "EXISTING_KEY=NEW_VALUE\n", # existing key + new value + "\n", + "## This is also a comment\n", + "# DISABLED_KEY=DISABLED_VALUE\n", + "\n", + "# Another comment\n", + "UNUSED_KEY=UNUSED_VALUE\n", + ] + assert content == expected_content_lines + assert mock_environ["EXISTING_KEY"] == "NEW_VALUE" + + +def test_set_env_config_value_uncomments_and_updates_disabled_key( + mock_env_file: Path, mock_environ: dict +): + # Before adding, ensure the original content is as expected + with mock_env_file.open("r") as file: + assert file.readlines() == env_file_initial_content.splitlines(True) + + set_env_config_value("DISABLED_KEY", "ENABLED_NEW_VALUE") + with mock_env_file.open("r") as file: + content = file.readlines() + + # Ensure only the relevant line is altered + expected_content_lines = [ + "\n", + "# This is a comment\n", + "EXISTING_KEY=EXISTING_VALUE\n", + "\n", + "## This is also a comment\n", + "DISABLED_KEY=ENABLED_NEW_VALUE\n", # disabled -> enabled + new value + "\n", + "# Another comment\n", + "UNUSED_KEY=UNUSED_VALUE\n", + ] + assert content == expected_content_lines + assert mock_environ["DISABLED_KEY"] == "ENABLED_NEW_VALUE" + + +def test_set_env_config_value_adds_new_key(mock_env_file: Path, mock_environ: dict): + # Before adding, ensure the original content is as expected + with mock_env_file.open("r") as file: + assert file.readlines() == env_file_initial_content.splitlines(True) + + set_env_config_value("NEW_KEY", "NEW_VALUE") + with mock_env_file.open("r") as file: + content = file.readlines() + + # Ensure the new key-value pair is added without altering the rest + expected_content_lines = [ + "\n", + "# This is a comment\n", + "EXISTING_KEY=EXISTING_VALUE\n", + "\n", + "## This is also a comment\n", + "# DISABLED_KEY=DISABLED_VALUE\n", + "\n", + "# Another comment\n", + "UNUSED_KEY=UNUSED_VALUE\n", + "NEW_KEY=NEW_VALUE\n", # New key-value pair added at the end + ] + assert content == expected_content_lines + assert mock_environ["NEW_KEY"] == "NEW_VALUE" diff --git a/autogpts/autogpt/tests/unit/test_web_search.py b/autogpts/autogpt/tests/unit/test_web_search.py new file mode 100644 index 000000000000..c4aba67f1108 --- /dev/null +++ b/autogpts/autogpt/tests/unit/test_web_search.py @@ -0,0 +1,136 @@ +import json + +import pytest +from googleapiclient.errors import HttpError + +from autogpt.agents.agent import Agent +from autogpt.agents.utils.exceptions import ConfigurationError +from autogpt.commands.web_search import google, safe_google_results, web_search + + +@pytest.mark.parametrize( + "query, expected_output", + [("test", "test"), (["test1", "test2"], '["test1", "test2"]')], +) +def test_safe_google_results(query, expected_output): + result = safe_google_results(query) + assert isinstance(result, str) + assert result == expected_output + + +def test_safe_google_results_invalid_input(): + with pytest.raises(AttributeError): + safe_google_results(123) + + +@pytest.mark.parametrize( + "query, num_results, expected_output_parts, return_value", + [ + ( + "test", + 1, + ("Result 1", "https://example.com/result1"), + [{"title": "Result 1", "href": "https://example.com/result1"}], + ), + ("", 1, (), []), + ("no results", 1, (), []), + ], +) +def test_google_search( + query, num_results, expected_output_parts, return_value, mocker, agent: Agent +): + mock_ddg = mocker.Mock() + mock_ddg.return_value = return_value + + mocker.patch("autogpt.commands.web_search.DDGS.text", mock_ddg) + actual_output = web_search(query, agent=agent, num_results=num_results) + for o in expected_output_parts: + assert o in actual_output + + +@pytest.fixture +def mock_googleapiclient(mocker): + mock_build = mocker.patch("googleapiclient.discovery.build") + mock_service = mocker.Mock() + mock_build.return_value = mock_service + return mock_service.cse().list().execute().get + + +@pytest.mark.parametrize( + "query, num_results, search_results, expected_output", + [ + ( + "test", + 3, + [ + {"link": "http://example.com/result1"}, + {"link": "http://example.com/result2"}, + {"link": "http://example.com/result3"}, + ], + [ + "http://example.com/result1", + "http://example.com/result2", + "http://example.com/result3", + ], + ), + ("", 3, [], []), + ], +) +def test_google_official_search( + query, + num_results, + expected_output, + search_results, + mock_googleapiclient, + agent: Agent, +): + mock_googleapiclient.return_value = search_results + actual_output = google(query, agent=agent, num_results=num_results) + assert actual_output == safe_google_results(expected_output) + + +@pytest.mark.parametrize( + "query, num_results, expected_error_type, http_code, error_msg", + [ + ( + "invalid query", + 3, + HttpError, + 400, + "Invalid Value", + ), + ( + "invalid API key", + 3, + ConfigurationError, + 403, + "invalid API key", + ), + ], +) +def test_google_official_search_errors( + query, + num_results, + expected_error_type, + mock_googleapiclient, + http_code, + error_msg, + agent: Agent, +): + class resp: + def __init__(self, _status, _reason): + self.status = _status + self.reason = _reason + + response_content = { + "error": {"code": http_code, "message": error_msg, "reason": "backendError"} + } + error = HttpError( + resp=resp(http_code, error_msg), + content=str.encode(json.dumps(response_content)), + uri="https://www.googleapis.com/customsearch/v1?q=invalid+query&cx", + ) + + mock_googleapiclient.side_effect = error + with pytest.raises(expected_error_type): + google(query, agent=agent, num_results=num_results) diff --git a/autogpts/autogpt/tests/utils.py b/autogpts/autogpt/tests/utils.py new file mode 100644 index 000000000000..d039bb898c36 --- /dev/null +++ b/autogpts/autogpt/tests/utils.py @@ -0,0 +1,10 @@ +import os + +import pytest + + +def skip_in_ci(test_function): + return pytest.mark.skipif( + os.environ.get("CI") == "true", + reason="This test doesn't work on GitHub Actions.", + )(test_function) diff --git a/autogpts/autogpt/tests/vcr/__init__.py b/autogpts/autogpt/tests/vcr/__init__.py new file mode 100644 index 000000000000..8d477cfe28dc --- /dev/null +++ b/autogpts/autogpt/tests/vcr/__init__.py @@ -0,0 +1,77 @@ +import logging +import os +from hashlib import sha256 + +import pytest +from openai import OpenAI +from openai._models import FinalRequestOptions +from openai._types import Omit +from openai._utils import is_given +from pytest_mock import MockerFixture + +from .vcr_filter import ( + before_record_request, + before_record_response, + freeze_request_body, +) + +DEFAULT_RECORD_MODE = "new_episodes" +BASE_VCR_CONFIG = { + "before_record_request": before_record_request, + "before_record_response": before_record_response, + "match_on": ["method", "headers"], +} + + +@pytest.fixture(scope="session") +def vcr_config(get_base_vcr_config): + return get_base_vcr_config + + +@pytest.fixture(scope="session") +def get_base_vcr_config(request): + record_mode = request.config.getoption("--record-mode", default="new_episodes") + config = BASE_VCR_CONFIG + + if record_mode is None: + config["record_mode"] = DEFAULT_RECORD_MODE + + return config + + +@pytest.fixture() +def vcr_cassette_dir(request): + test_name = os.path.splitext(request.node.name)[0] + return os.path.join("tests/vcr_cassettes", test_name) + + +@pytest.fixture +def cached_openai_client(mocker: MockerFixture) -> OpenAI: + client = OpenAI() + _prepare_options = client._prepare_options + + def _patched_prepare_options(self, options: FinalRequestOptions): + _prepare_options(options) + + headers: dict[str, str | Omit] = ( + {**options.headers} if is_given(options.headers) else {} + ) + options.headers = headers + data: dict = options.json_data + + logging.getLogger("cached_openai_client").debug( + f"Outgoing API request: {headers}\n{data if data else None}" + ) + + # Add hash header for cheap & fast matching on cassette playback + headers["X-Content-Hash"] = sha256( + freeze_request_body(data), usedforsecurity=False + ).hexdigest() + + mocker.patch.object( + client, + "_prepare_options", + new=_patched_prepare_options, + ) + + return client diff --git a/autogpts/autogpt/tests/vcr/vcr_filter.py b/autogpts/autogpt/tests/vcr/vcr_filter.py new file mode 100644 index 000000000000..81c269fc50d1 --- /dev/null +++ b/autogpts/autogpt/tests/vcr/vcr_filter.py @@ -0,0 +1,110 @@ +import contextlib +import json +import re +from io import BytesIO +from typing import Any + +from vcr.request import Request + +HOSTNAMES_TO_CACHE: list[str] = [ + "api.openai.com", + "localhost:50337", + "duckduckgo.com", +] + +IGNORE_REQUEST_HEADERS: set[str | re.Pattern] = { + "Authorization", + "Cookie", + "OpenAI-Organization", + "X-OpenAI-Client-User-Agent", + "User-Agent", + re.compile(r"X-Stainless-[\w\-]+", re.IGNORECASE), +} + +LLM_MESSAGE_REPLACEMENTS: list[dict[str, str]] = [ + { + "regex": r"\w{3} \w{3} {1,2}\d{1,2} \d{2}:\d{2}:\d{2} \d{4}", + "replacement": "Tue Jan 1 00:00:00 2000", + }, + { + "regex": r"]*>", + "replacement": "", + }, +] + +OPENAI_URL = "api.openai.com" + + +def before_record_request(request: Request) -> Request | None: + if not should_cache_request(request): + return None + + request = filter_request_headers(request) + request = freeze_request(request) + return request + + +def should_cache_request(request: Request) -> bool: + return any(hostname in request.url for hostname in HOSTNAMES_TO_CACHE) + + +def filter_request_headers(request: Request) -> Request: + for header_name in list(request.headers): + if any( + ( + (type(ignore) is str and ignore.lower() == header_name.lower()) + or (isinstance(ignore, re.Pattern) and ignore.match(header_name)) + ) + for ignore in IGNORE_REQUEST_HEADERS + ): + del request.headers[header_name] + return request + + +def freeze_request(request: Request) -> Request: + if not request or not request.body: + return request + + with contextlib.suppress(ValueError): + request.body = freeze_request_body( + json.loads( + request.body.getvalue() + if isinstance(request.body, BytesIO) + else request.body + ) + ) + + return request + + +def freeze_request_body(body: dict) -> bytes: + """Remove any dynamic items from the request body""" + + if "messages" not in body: + return json.dumps(body, sort_keys=True).encode() + + if "max_tokens" in body: + del body["max_tokens"] + + for message in body["messages"]: + if "content" in message and "role" in message: + if message["role"] == "system": + message["content"] = replace_message_content( + message["content"], LLM_MESSAGE_REPLACEMENTS + ) + + return json.dumps(body, sort_keys=True).encode() + + +def replace_message_content(content: str, replacements: list[dict[str, str]]) -> str: + for replacement in replacements: + pattern = re.compile(replacement["regex"]) + content = pattern.sub(replacement["replacement"], content) + + return content + + +def before_record_response(response: dict[str, Any]) -> dict[str, Any]: + if "Transfer-Encoding" in response["headers"]: + del response["headers"]["Transfer-Encoding"] + return response From dcf286461db772191b2ead855ba4c79dd96e3125 Mon Sep 17 00:00:00 2001 From: Dman0808 <168228320+Dman0808@users.noreply.github.com> Date: Thu, 24 Oct 2024 11:58:33 -0500 Subject: [PATCH 12/14] Update subproject commit hash with dirty flag --- classic/frontend/lib/main.dart | 2 +- classic/frontend/lib/views/chat/agent_message_tile.dart | 3 +-- classic/frontend/lib/views/main_layout.dart | 2 +- .../lib/views/task_queue/leaderboard_submission_dialog.dart | 1 - 4 files changed, 3 insertions(+), 5 deletions(-) diff --git a/classic/frontend/lib/main.dart b/classic/frontend/lib/main.dart index bdb86047267c..f9016c026cab 100644 --- a/classic/frontend/lib/main.dart +++ b/classic/frontend/lib/main.dart @@ -69,7 +69,7 @@ void main() async { SettingsViewModel(restApiUtility, prefsService), ), ], - child: MyApp(), + child: const MyApp(), ), ); } diff --git a/classic/frontend/lib/views/chat/agent_message_tile.dart b/classic/frontend/lib/views/chat/agent_message_tile.dart index dedcc7971a7c..4e9b88c970f6 100644 --- a/classic/frontend/lib/views/chat/agent_message_tile.dart +++ b/classic/frontend/lib/views/chat/agent_message_tile.dart @@ -30,8 +30,7 @@ class _AgentMessageTileState extends State { bool containsMarkdown(String text) { // Regular expression to detect Markdown patterns like headers, bold, links, etc. final RegExp markdownPattern = RegExp( - r'(?:\*\*|__).*?(?:\*\*|__)|' r'(?:\*|_).*?(?:\*|_)|' + // Italic - r'\[.*?\]\(.*?\)|' + // Links + r'(?:\*\*|__).*?(?:\*\*|__)|' r'(?:\*|_).*?(?:\*|_)|' r'\[.*?\]\(.*?\)|' + // Links r'!\[.*?\]\(.*?\)|' + // Images r'#{1,6}.*|' + // Headers r'```.*?```', // Fenced code blocks diff --git a/classic/frontend/lib/views/main_layout.dart b/classic/frontend/lib/views/main_layout.dart index bbf247897287..6e2b960baefc 100644 --- a/classic/frontend/lib/views/main_layout.dart +++ b/classic/frontend/lib/views/main_layout.dart @@ -105,7 +105,7 @@ class MainLayout extends StatelessWidget { if (skillTreeViewModel.selectedNode != null) SizedBox( width: testQueueViewWidth, - child: TaskQueueView()), + child: const TaskQueueView()), SizedBox( width: chatViewWidth, child: ChatView(viewModel: chatViewModel)), diff --git a/classic/frontend/lib/views/task_queue/leaderboard_submission_dialog.dart b/classic/frontend/lib/views/task_queue/leaderboard_submission_dialog.dart index 0dd28eab99a1..ad7826bb3534 100644 --- a/classic/frontend/lib/views/task_queue/leaderboard_submission_dialog.dart +++ b/classic/frontend/lib/views/task_queue/leaderboard_submission_dialog.dart @@ -2,7 +2,6 @@ import 'package:auto_gpt_flutter_client/constants/app_colors.dart'; import 'package:auto_gpt_flutter_client/utils/uri_utility.dart'; import 'package:auto_gpt_flutter_client/viewmodels/task_queue_viewmodel.dart'; import 'package:flutter/material.dart'; -import 'package:shared_preferences/shared_preferences.dart'; class LeaderboardSubmissionDialog extends StatefulWidget { final Function(String, String, String)? onSubmit; From d1f0db20abf8f972225c4139bdac65aacb6746b3 Mon Sep 17 00:00:00 2001 From: Dman0808 <168228320+Dman0808@users.noreply.github.com> Date: Thu, 24 Oct 2024 12:02:13 -0500 Subject: [PATCH 13/14] Update subproject commit hash with dirty flag --- autogpt_platform/frontend/flutter | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt_platform/frontend/flutter b/autogpt_platform/frontend/flutter index f67fa22d6383..c0af605f975f 160000 --- a/autogpt_platform/frontend/flutter +++ b/autogpt_platform/frontend/flutter @@ -1 +1 @@ -Subproject commit f67fa22d63833d855b75b80a7948fb6a0688385d +Subproject commit c0af605f975f8bd44e0ae0c86c712f3ecb5f54f1 From 4cc276345cd85d9012dbcac694da93fb10ece221 Mon Sep 17 00:00:00 2001 From: Dman0808 <168228320+Dman0808@users.noreply.github.com> Date: Mon, 28 Oct 2024 16:32:57 -0500 Subject: [PATCH 14/14] Update workspace settings --- .vscode/all-projects.code-workspace | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.vscode/all-projects.code-workspace b/.vscode/all-projects.code-workspace index ff575ab3ca26..261bd7bb7bf8 100644 --- a/.vscode/all-projects.code-workspace +++ b/.vscode/all-projects.code-workspace @@ -47,7 +47,8 @@ } ], "settings": { - "python.analysis.typeCheckingMode": "basic" + "python.analysis.typeCheckingMode": "basic", + "dart.showTodos": false }, "extensions": { "recommendations": [