diff --git a/draft_notebook.ipynb b/draft_notebook.ipynb new file mode 100644 index 00000000..077538bf --- /dev/null +++ b/draft_notebook.ipynb @@ -0,0 +1,638 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "from llama_index.core import PromptTemplate\n", + "\n", + "world_model_examples = \"\"\"\n", + "Objective: What's the name of our Lead Developer ?\n", + "Previous instructions: [NONE]\n", + "Last engine: [NONE]\n", + "Current relevant state: \n", + "- screenshot: [SCREENSHOT]\n", + "- page_content_summary: The organization is comprised of several teams, each with a distinct focus. The Software Development Team creates custom applications, while the IT Support Team ensures the technology infrastructure runs smoothly. The Quality Assurance Team rigorously tests products to maintain high standards of quality. The Research and Development Team explores new technologies and methodologies, and the Systems Engineering Team designs and maintains complex systems that power the organization's services.\n", + "\n", + "Thoughts:\n", + "- The screenshot shows a Notion page.\n", + "- The page content summary provides an overview of the organization's structure and the roles of different teams.\n", + "- The \"Our Departments\" section lists various departments, including \"Engineering Department.\"\n", + "- To find the name of the Lead Developer, I should look for information under the \"Engineering Department\" section.\n", + "- The \"Engineering Department\" section is likely to contain details about the team, including the Lead Developer.\n", + "Next engine: Navigation Engine\n", + "Instruction: Locate the link titled \"Engineering Department\" and click on it.\n", + "-----\n", + "Objective: Go to the first issue you can find\n", + "Previous instructions: \n", + "- Click on 'Issues' with the number '28' next to it.\n", + "Last engine: Navigation Engine\n", + "Current relevant state: \n", + "- screenshot: [SCREENSHOT]\n", + "- page_content_summary: The provided text appears to be a list of issues or tasks related to the LaVague project. The issues are categorized into different types, including bugs, enhancements, and documentation improvements. The issues range from debugging guides to documentation improvements, and even feature requests. Some issues are labeled as \"help wanted,\" indicating that they require additional attention or expertise. The text also includes information about the status of the issues, such as whether they are open, in progress, or have been closed\n", + "\n", + "Thoughts:\n", + "- The current screenshot shows the issues page of the repository 'lavague-ai/LaVague'.\n", + "- The summary indicates that the page contains a list of issues related to the LaVague project.\n", + "- The objective is to go to the first issue.\n", + "- The first issue is highlighted in the list of issues.\n", + "Next engine: Navigation Engine\n", + "Instruction: Click on the issue, with title 'Build and share place where people can suggest their use cases and results #225'\n", + "-----\n", + "Objective: Find When Llama 3 was released\n", + "Previous instructions:\n", + "- Click on 'meta-llama/Meta-Llama-3-8B'\n", + "Last engine: Navigation Engine\n", + "Current relevant state: \n", + "- screenshot: [SCREENSHOT]\n", + "- page_content_summary: The provided text discusses the development and release of Llama 3, a large language model designed for various applications. The model's development prioritizes responsible AI development, limiting misuse and harm, and supporting the open-source community. \n", + "The model's safety features include safeguards such as Meta Llama Guard 2 and Code Shield, which drastically reduce residual risks while maintaining helpfulness. The model's safety is ensured through extensive testing, adversarial evaluations, and implemented safety mitigations. \n", + "The text also highlights the importance of responsible use, emphasizing the need for developers to implement safety best practices and tailor safeguards to their specific use case and audience. The model's refusals to benign prompts have been improved, reducing the likelihood of falsely refusing to answer prompts. \n", + "The release of Llama 3 followed a rigorous process, including extra measures against misuse and critical risks. The model's safety has been assessed in various areas, including CBRNE threats, cyber security, and child safety. The company is actively contributing to open consortiums, promoting safety standardization and transparency in the development of generative AI.\n", + "\n", + "Thoughts:\n", + "- The current screenshot shows the model page for 'meta-llama/Meta-Llama-3-8B' on Hugging Face.\n", + "- The page content summary provides detailed information about the development and release of Llama 3, emphasizing its safety features and responsible AI development.\n", + "- Hugging Face, is a hub for AI models and datasets, where users can explore and interact with a variety of AI models.\n", + "- I am therefore on the right page to find information about the release date of 'Meta-Llama-3-8B'.\n", + "- To find the release date, I need to locate the relevant information in the content of the page.\n", + "- Therefore the best next step is to use the Python Engine to extract the release date from the content of the page.\n", + "Next engine: Python Engine\n", + "Instruction: Extract the release date of 'Meta-Llama-3-8B' from the textual content of the page.\n", + "-----\n", + "Objective: Provide the code to get started with Gemini API\n", + "Previous instructions:\n", + "- Click on 'Read API docs'\n", + "- Click on 'Gemini API quickstart' on the menu\n", + "Last engine: Navigation Engine\n", + "Current relevant state:\n", + "- screenshot: [SCREENSHOT]\n", + "- page_content_summary: The text provides an introduction to the Gemini API, a family of Google's most capable AI models. It offers a comprehensive guide to help users get started with the API. The fastest way to start using Gemini is through Google AI Studio, a web-based tool that allows users to prototype, run prompts, and get started with the API. The text also provides a quickstart guide to help users begin. Additionally, it emphasizes the importance of using LLMs safely and responsibly, directing users to safety settings and safety guidance documentation. The text also mentions that the Gemini API and Google AI Studio are available in over 180 countries. Finally, it provides further reading resources for users to learn more about the models provided by the Gemini API.\n", + "\n", + "Thoughts:\n", + "- The current screenshot shows the documentation page for the getting started of Gemini API.\n", + "- The page content summary provides an overview of the Gemini API, highlighting its capabilities and the code to get started.\n", + "- I am therefore on the right page to find the code to get started with the Gemini API.\n", + "- The next step is to provide the code to get started with the Gemini API.\n", + "- Therefore I need to use the Python Engine to generate the code to extract the code to get started with the Gemini API from this page.\n", + "Next engine: Python Engine\n", + "Instruction: Extract the code to get started with the Gemini API from the content of the page.\n", + "\"\"\"\n", + "\n", + "WORLD_MODEL_PROMPT_TEMPLATE = PromptTemplate(\"\"\"\n", + "You are an AI system specialized in high level reasoning. Your goal is to generate instructions for other specialized AIs to perform web actions to reach objectives given by humans.\n", + "Your inputs are:\n", + "- objective ('str'): a high level description of the goal to achieve.\n", + "- previous_instructions ('str'): a list of previous steps taken to reach the objective.\n", + "- last_engine ('str'): the engine used in the previous step.\n", + "- current_relevant_state ('obj'): the state of the environment to use to perform the next step. This can be a screenshot if the previous engine was a NavigationEngine, or description of variables if the previous engine was a PythonEngine.\n", + "\n", + "Your output are:\n", + "- thoughts ('str'): a list of thoughts in bullet points detailling your reasoning.\n", + "- next_engine ('str'): the engine to use for the next step.\n", + "- instruction ('str'): the instruction for the engine to perform the next step.\n", + "\n", + "Here are the engines at your disposal:\n", + "- Python Engine: This engine is used when the task requires doing computing using the current state of the agent. \n", + "It does not impact the outside world and does not navigate.\n", + "- Navigation Engine: This engine is used when the next step of the task requires further navigation to reach the goal. \n", + "For instance it can be used to click on a link or to fill a form on a webpage. This engine is heavy and will do complex processing of the current HTML to decide which element to interact with.\n", + "- Navigation Controls: This is a simpler engine to do commands that does not require reasoning, such as scrolling down or wait.\n", + "\n", + "The instruction should be detailled as possible and only contain the next step. \n", + "If the objective is already achieved in the screenshot, provide the next engine and instruction 'STOP'.\n", + "\n", + "Here are previous examples:\n", + "{examples}\n", + "\n", + "Here is the next objective:\n", + "Objective: {objective}\n", + "Previous instructions: \n", + "{previous_instructions}\n", + "Last engine: {last_engine}\n", + "Current relevant state: {current_state}\n", + "\n", + "Thought:\n", + "\"\"\"\n", + ")\n" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'\\n- IO Engine: This engine is used when the task requires interacting with the outside world, such at the end of an agent run, to send the result of the agent to the user.\\nFor instance, it can be used to send an email with the result of the agent, or pull data from a database to update the agent state.\\n'" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "\"\"\"\n", + "- IO Engine: This engine is used when the task requires interacting with the outside world, such at the end of an agent run, to send the result of the agent to the user.\n", + "For instance, it can be used to send an email with the result of the agent, or pull data from a database to update the agent state.\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/dhuynh/miniconda3/envs/myenv/lib/python3.10/site-packages/lavague/core/__init__.py:18: UserWarning: \u001b[93mTelemetry is turned on. To turn off telemetry, set your TELEMETRY_VAR to 'NONE'\u001b[0m\n", + " warnings.warn(warning_message, UserWarning)\n", + "/home/dhuynh/miniconda3/envs/myenv/lib/python3.10/site-packages/lavague/core/__init__.py:24: UserWarning: \u001b[93mSecurity warning: This package executes LLM-generated code. Consider using this package in a sandboxed environment.\u001b[0m\n", + " warnings.warn(warning_message, UserWarning)\n" + ] + } + ], + "source": [ + "from lavague.contexts.gemini import GeminiContext\n", + "gemini_context = GeminiContext()\n", + "\n", + "from lavague.contexts.openai import OpenaiContext\n", + "context = OpenaiContext(llm=\"gpt-4o\",embedding=\"text-embedding-3-small\")\n", + "\n", + "context.llm = gemini_context.llm\n", + "\n", + "mm_llm = context.mm_llm\n", + "prompt_template = WORLD_MODEL_PROMPT_TEMPLATE.partial_format(\n", + " examples=world_model_examples\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "from typing import Optional\n", + "from lavague.core import Context, get_default_context\n", + "from llama_index.core.base.llms.base import BaseLLM\n", + "import copy\n", + "\n", + "PYTHON_ENGINE_PROMPT_TEMPLATE = PromptTemplate(\"\"\"\n", + "You are an AI system specialized in Python code generation to answer user queries.\n", + "The inputs are: an instruction, and the current state of the local variables available to the environment where your code will be executed.\n", + "Your output is the code that will perform the action described in the instruction, using the variables available in the environment.\n", + "You can import libraries and use any variables available in the environment.\n", + "Detail thoroughly the steps to perform the action in the code you generate with comments.\n", + "The last line of your code should be an assignment to the variable 'output' containing the result of the action.\n", + "\n", + "Here are previous examples:\n", + "{examples}\n", + "\n", + "Instruction: {instruction}\n", + "State:\n", + "{state_description}\n", + "Code:\n", + "\"\"\")\n", + "\n", + "class PythonEngine:\n", + " llm: BaseLLM \n", + " prompt_template: PromptTemplate\n", + "\n", + " def __init__(self, examples: str, context: Optional[Context] = None):\n", + " if context is None:\n", + " context = get_default_context()\n", + " self.llm = context.llm\n", + " self.extractor = context.extractor\n", + " self.prompt_template = PYTHON_ENGINE_PROMPT_TEMPLATE.partial_format(\n", + " examples=examples\n", + " )\n", + " \n", + " def generate_code(self, instruction: str, state: dict) -> str:\n", + " state_description = self.get_state_description(state)\n", + " prompt = self.prompt_template.format(instruction=instruction, state_description=state_description)\n", + " response = self.llm.complete(prompt).text\n", + " code = self.extractor.extract(response)\n", + " return code\n", + " \n", + " def execute_code(self, code: str, state: dict):\n", + " local_scope = copy.deepcopy(state)\n", + " exec(code, local_scope, local_scope)\n", + " output = local_scope[\"output\"]\n", + " return output\n", + " \n", + " def get_state_description(self, state: dict) -> str:\n", + " \"\"\"TO DO: provide more complex state descriptions\"\"\"\n", + " state_description = \"\"\"\n", + " html ('str'): The content of the HTML page being analyzed\"\"\"\n", + " return state_description\n", + " \n", + "with open(\"python_engine_examples.txt\") as f:\n", + " python_engine_examples = f.read()\n", + " \n", + "python_engine = PythonEngine(python_engine_examples, context)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "REWRITER_PROMPT_TEMPLATE = PromptTemplate(\"\"\"\n", + "You are an AI expert.\n", + "You are given a high level instruction on a generic action to perform.\n", + "Your output is an instruction of the action, rewritten to be more specific on the capabilities at your disposal to perform the action.\n", + "Here are your capabilities:\n", + "{capabilities}\n", + "\n", + "Here are previous examples:\n", + "{examples}\n", + "\n", + "Here is the next instruction to rewrite:\n", + "Original instruction: {original_instruction}\n", + "\"\"\")\n", + "\n", + "DEFAULT_CAPABILITIES = \"\"\"\n", + "- Answer questions using the content of an HTML page using llama index and trafilatura\n", + "\"\"\"\n", + "\n", + "DEFAULT_EXAMPLES = \"\"\"\n", + "Original instruction: Use the content of the HTML page to answer the question 'How was falcon-11B trained?'\n", + "Capability: Answer questions using the content of an HTML page using llama index and trafilatura\n", + "Rewritten instruction: Extract the content of the HTML page and use llama index to answer the question 'How was falcon-11B trained?'\n", + "\"\"\"\n", + "\n", + "class Rewriter:\n", + " def __init__(self, capabilities: str = DEFAULT_CAPABILITIES, examples: str = DEFAULT_EXAMPLES, context: Optional[Context] = None):\n", + " if context is None:\n", + " context = get_default_context()\n", + " self.llm = context.llm\n", + " self.prompt_template = REWRITER_PROMPT_TEMPLATE.partial_format(\n", + " capabilities=capabilities,\n", + " examples=examples\n", + " ) \n", + " def rewrite_instruction(self, original_instruction: str) -> str:\n", + " prompt = self.prompt_template.format(original_instruction=original_instruction)\n", + " rewritten_instruction = self.llm.complete(prompt).text\n", + " return rewritten_instruction\n", + " \n", + "rewriter = Rewriter(capabilities=DEFAULT_CAPABILITIES, examples=DEFAULT_EXAMPLES, context=context)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "# Flemme de finir\n", + "\n", + "# Thoughts:\n", + "# - The current page is the Azure Pricing Calculator.\n", + "# - To calculate the cost of an AKS (Azure Kubernetes Service) cluster, we need to add the AKS service to the calculator.\n", + "# - The current screen does not show the options to add services.\n", + "# - The best approach is to scroll down to find the section where services can be added to the calculator.\n", + "# - Because this is a simple scrolling action, the best next step is to use the Navigation Controls engine to scroll down.\n", + "# Next engine: Navigation Controls\n", + "# Instruction: Scroll down by one full screen to continue exploring the current page.\n", + "# -----\n", + "# Objective: Find the definition of 'Diffusers'\n", + "# Previous instructions: \n", + "# - Click on 'Diffusers' link\n", + "# Last engine: Navigation Engine\n", + "# Current state: [SCREENSHOT]\n", + "\n", + "# Thought:\n", + "# - The current page is the documentation page of Hugging Face.\n", + "# - Hugging Face is a platform for AI models and datasets, where users can explore and interact with latest AI resources.\n", + "# - The definition of 'Diffusers' is provided in the documentation.\n", + "# - No further action is needed to achieve the objective.\n", + "# Next engine: STOP\n", + "# Instruction: STOP -->" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "from selenium import webdriver\n", + "from selenium.webdriver.common.by import By\n", + "from selenium.webdriver.chrome.options import Options\n", + "from selenium.webdriver.common.keys import Keys\n", + "from selenium.webdriver.common.action_chains import ActionChains\n", + "\n", + "chrome_options = Options()\n", + "chrome_options.add_argument(\"--no-sandbox\")\n", + "driver = webdriver.Chrome(options=chrome_options)\n", + "\n", + "width = 1024\n", + "height = 1024\n", + "\n", + "driver.set_window_size(width, height)\n", + "viewport_height = driver.execute_script(\"return window.innerHeight;\")\n", + "\n", + "height_difference = height - viewport_height\n", + "driver.set_window_size(width, height + height_difference)\n", + "\n", + "driver.get(\"https://huggingface.co/meta-llama/Meta-Llama-3-8B\")" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "from llama_index.core import Document, VectorStoreIndex\n", + "from llama_index.core import Settings\n", + "import trafilatura\n", + "\n", + "class PageSummarizer:\n", + " def __init__(self, llm, embed_model):\n", + " self.llm = llm\n", + " self.embed_model = embed_model\n", + " \n", + " def summarize(self, html: str) -> str:\n", + " Settings.llm = self.llm\n", + " Settings.embed_model = self.embed_model\n", + " \n", + " page_content = trafilatura.extract(html)\n", + " \n", + " documents = [Document(text=page_content)]\n", + " index = VectorStoreIndex.from_documents(documents)\n", + " query_engine = index.as_query_engine()\n", + " instruction = \"Provide a detailled summary of this text\"\n", + " page_content_summary = query_engine.query(instruction).response\n", + " return page_content_summary\n", + "\n", + "from llama_index.llms.groq import Groq\n", + "\n", + "model = \"llama3-8b-8192\"\n", + "llm = Groq(model=model, temperature=0.1)\n", + "embed_model = context.embedding\n", + "\n", + "page_summarizer = PageSummarizer(llm, embed_model)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "import re\n", + "\n", + "def extract_instruction(text):\n", + " # Use a regular expression to find the content after \"Instruction:\"\n", + " match = re.search(r\"Instruction:\\s*(.*)\", text)\n", + " if match:\n", + " return match.group(\n", + " 1\n", + " ).strip() # Return the matched group, stripping any excess whitespace\n", + " else:\n", + " raise ValueError(\"No instruction found in the text.\")\n", + "\n", + "def extract_next_engine(text):\n", + " # Use a regular expression to find the content after \"Next engine:\"\n", + " match = re.search(r\"Next engine:\\s*(.*)\", text)\n", + " if match:\n", + " return match.group(\n", + " 1\n", + " ).strip() # Return the matched group, stripping any excess whitespace\n", + " else:\n", + " raise ValueError(\"No next engine found in the text.\")" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "from lavague.core import ActionEngine\n", + "from lavague.drivers.selenium import SeleniumDriver\n", + "\n", + "selenium_driver = SeleniumDriver()\n", + "selenium_driver.driver = driver\n", + "navigation_engine = ActionEngine(selenium_driver)\n", + "action_engine = navigation_engine" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "- The current screenshot shows the documentation page of Hugging Face.\n", + "- The page content summary provides an overview of various products, services, and tools offered by Hugging Face.\n", + "- The objective is to provide the code to install `transformers.js`.\n", + "- The relevant section for `transformers.js` is likely under the \"Transformers.js\" link.\n", + "- The next step is to navigate to the \"Transformers.js\" section to find the installation code.\n", + "\n", + "Next engine: Navigation Engine\n", + "Instruction: Click on the \"Transformers.js\" link to navigate to its documentation page.\n" + ] + }, + { + "ename": "KeyboardInterrupt", + "evalue": "", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m/tmp/ipykernel_2568/928459559.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 48\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 49\u001b[0m \u001b[0mlocal_scope\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m{\u001b[0m\u001b[0;34m\"driver\"\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mdriver\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 50\u001b[0;31m \u001b[0mexec\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcode\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlocal_scope\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlocal_scope\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 51\u001b[0m \u001b[0;32mbreak\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 52\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n", + "\u001b[0;32m~/miniconda3/envs/myenv/lib/python3.10/site-packages/selenium/webdriver/remote/webelement.py\u001b[0m in \u001b[0;36mclick\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 92\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mclick\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 93\u001b[0m \u001b[0;34m\"\"\"Clicks the element.\"\"\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 94\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_execute\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mCommand\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mCLICK_ELEMENT\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 95\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 96\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0msubmit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/miniconda3/envs/myenv/lib/python3.10/site-packages/selenium/webdriver/remote/webelement.py\u001b[0m in \u001b[0;36m_execute\u001b[0;34m(self, command, params)\u001b[0m\n\u001b[1;32m 393\u001b[0m \u001b[0mparams\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m{\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 394\u001b[0m \u001b[0mparams\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"id\"\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_id\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 395\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_parent\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexecute\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcommand\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparams\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 396\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 397\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mfind_element\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mby\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mBy\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mID\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0mWebElement\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/miniconda3/envs/myenv/lib/python3.10/site-packages/selenium/webdriver/remote/webdriver.py\u001b[0m in \u001b[0;36mexecute\u001b[0;34m(self, driver_command, params)\u001b[0m\n\u001b[1;32m 343\u001b[0m \u001b[0mparams\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"sessionId\"\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msession_id\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 344\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 345\u001b[0;31m \u001b[0mresponse\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcommand_executor\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexecute\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdriver_command\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparams\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 346\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mresponse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 347\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0merror_handler\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcheck_response\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mresponse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/miniconda3/envs/myenv/lib/python3.10/site-packages/selenium/webdriver/remote/remote_connection.py\u001b[0m in \u001b[0;36mexecute\u001b[0;34m(self, command, params)\u001b[0m\n\u001b[1;32m 300\u001b[0m \u001b[0mtrimmed\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_trim_large_entries\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mparams\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 301\u001b[0m \u001b[0mLOGGER\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdebug\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"%s %s %s\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcommand_info\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0murl\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtrimmed\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 302\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_request\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcommand_info\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0murl\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbody\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 303\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 304\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_request\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmethod\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0murl\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbody\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/miniconda3/envs/myenv/lib/python3.10/site-packages/selenium/webdriver/remote/remote_connection.py\u001b[0m in \u001b[0;36m_request\u001b[0;34m(self, method, url, body)\u001b[0m\n\u001b[1;32m 320\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 321\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mkeep_alive\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 322\u001b[0;31m \u001b[0mresponse\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_conn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrequest\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmethod\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0murl\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbody\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mbody\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mheaders\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mheaders\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 323\u001b[0m \u001b[0mstatuscode\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mresponse\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstatus\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 324\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/miniconda3/envs/myenv/lib/python3.10/site-packages/urllib3/_request_methods.py\u001b[0m in \u001b[0;36mrequest\u001b[0;34m(self, method, url, body, fields, headers, json, **urlopen_kw)\u001b[0m\n\u001b[1;32m 142\u001b[0m )\n\u001b[1;32m 143\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 144\u001b[0;31m return self.request_encode_body(\n\u001b[0m\u001b[1;32m 145\u001b[0m \u001b[0mmethod\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0murl\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfields\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfields\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mheaders\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mheaders\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0murlopen_kw\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 146\u001b[0m )\n", + "\u001b[0;32m~/miniconda3/envs/myenv/lib/python3.10/site-packages/urllib3/_request_methods.py\u001b[0m in \u001b[0;36mrequest_encode_body\u001b[0;34m(self, method, url, fields, headers, encode_multipart, multipart_boundary, **urlopen_kw)\u001b[0m\n\u001b[1;32m 277\u001b[0m \u001b[0mextra_kw\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mupdate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0murlopen_kw\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 278\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 279\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0murlopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmethod\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0murl\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mextra_kw\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;32m~/miniconda3/envs/myenv/lib/python3.10/site-packages/urllib3/poolmanager.py\u001b[0m in \u001b[0;36murlopen\u001b[0;34m(self, method, url, redirect, **kw)\u001b[0m\n\u001b[1;32m 442\u001b[0m \u001b[0mresponse\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mconn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0murlopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmethod\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0murl\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkw\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 443\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 444\u001b[0;31m \u001b[0mresponse\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mconn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0murlopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmethod\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mu\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrequest_uri\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkw\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 445\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 446\u001b[0m \u001b[0mredirect_location\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mredirect\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mresponse\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_redirect_location\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/miniconda3/envs/myenv/lib/python3.10/site-packages/urllib3/connectionpool.py\u001b[0m in \u001b[0;36murlopen\u001b[0;34m(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, preload_content, decode_content, **response_kw)\u001b[0m\n\u001b[1;32m 791\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 792\u001b[0m \u001b[0;31m# Make the request on the HTTPConnection object\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 793\u001b[0;31m response = self._make_request(\n\u001b[0m\u001b[1;32m 794\u001b[0m \u001b[0mconn\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 795\u001b[0m \u001b[0mmethod\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/miniconda3/envs/myenv/lib/python3.10/site-packages/urllib3/connectionpool.py\u001b[0m in \u001b[0;36m_make_request\u001b[0;34m(self, conn, method, url, body, headers, retries, timeout, chunked, response_conn, preload_content, decode_content, enforce_content_length)\u001b[0m\n\u001b[1;32m 535\u001b[0m \u001b[0;31m# Receive the response from the server\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 536\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 537\u001b[0;31m \u001b[0mresponse\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mconn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgetresponse\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 538\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mBaseSSLError\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mOSError\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 539\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_raise_timeout\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0merr\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0murl\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0murl\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtimeout_value\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mread_timeout\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/miniconda3/envs/myenv/lib/python3.10/site-packages/urllib3/connection.py\u001b[0m in \u001b[0;36mgetresponse\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 464\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 465\u001b[0m \u001b[0;31m# Get the response from http.client.HTTPConnection\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 466\u001b[0;31m \u001b[0mhttplib_response\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgetresponse\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 467\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 468\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/miniconda3/envs/myenv/lib/python3.10/http/client.py\u001b[0m in \u001b[0;36mgetresponse\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 1373\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1374\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1375\u001b[0;31m \u001b[0mresponse\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbegin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1376\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mConnectionError\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1377\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mclose\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/miniconda3/envs/myenv/lib/python3.10/http/client.py\u001b[0m in \u001b[0;36mbegin\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 316\u001b[0m \u001b[0;31m# read until we get a non-100 response\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 317\u001b[0m \u001b[0;32mwhile\u001b[0m \u001b[0;32mTrue\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 318\u001b[0;31m \u001b[0mversion\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstatus\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mreason\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_read_status\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 319\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mstatus\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0mCONTINUE\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 320\u001b[0m \u001b[0;32mbreak\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/miniconda3/envs/myenv/lib/python3.10/http/client.py\u001b[0m in \u001b[0;36m_read_status\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 277\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 278\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_read_status\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 279\u001b[0;31m \u001b[0mline\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreadline\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m_MAXLINE\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"iso-8859-1\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 280\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mline\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0m_MAXLINE\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 281\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mLineTooLong\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"status line\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/miniconda3/envs/myenv/lib/python3.10/socket.py\u001b[0m in \u001b[0;36mreadinto\u001b[0;34m(self, b)\u001b[0m\n\u001b[1;32m 703\u001b[0m \u001b[0;32mwhile\u001b[0m \u001b[0;32mTrue\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 704\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 705\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_sock\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrecv_into\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mb\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 706\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mtimeout\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 707\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_timeout_occurred\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mTrue\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mKeyboardInterrupt\u001b[0m: " + ] + } + ], + "source": [ + "from llama_index.core import SimpleDirectoryReader\n", + "\n", + "objective = \"Provide the code to \"\n", + "driver.get(\"https://huggingface.co/docs\")\n", + "\n", + "previous_instructions = \"[NONE]\"\n", + "last_engine = \"[NONE]\"\n", + "\n", + "N_ATTEMPTS = 5\n", + "N_STEPS = 5\n", + "\n", + "driver.save_screenshot(\"screenshots/output.png\")\n", + "image_documents = SimpleDirectoryReader(\"./screenshots\").load_data()\n", + "\n", + "html = driver.page_source\n", + "page_content_summary = page_summarizer.summarize(html)\n", + "\n", + "current_state = f\"\"\"\n", + "- screenshot: [SCREENSHOT]\n", + "- page_content_summary: {page_content_summary}\n", + "\"\"\"\n", + "\n", + "for i in range(N_STEPS):\n", + " prompt = prompt_template.format(\n", + " objective=objective, previous_instructions=previous_instructions, \n", + " last_engine=last_engine, current_state=current_state)\n", + "\n", + " output = mm_llm.complete(prompt, image_documents=image_documents).text\n", + "\n", + " print(output)\n", + "\n", + " next_engine = extract_next_engine(output)\n", + " instruction = extract_instruction(output)\n", + " \n", + " if next_engine == \"Navigation Engine\":\n", + " \n", + " query = instruction\n", + " nodes = action_engine.get_nodes(query)\n", + " llm_context = \"\\n\".join(nodes)\n", + "\n", + " for _ in range(N_ATTEMPTS):\n", + " try:\n", + " action = action_engine.get_action_from_context(llm_context, query)\n", + " code = f\"\"\"\n", + "from selenium.webdriver.common.by import By\n", + "from selenium.webdriver.common.keys import Keys\n", + " {action}\"\"\".strip()\n", + "\n", + " local_scope = {\"driver\": driver}\n", + " exec(code, local_scope, local_scope)\n", + " break\n", + " except Exception as e:\n", + " print(f\"Action execution failed. Retrying...\")\n", + " pass\n", + " \n", + " driver.save_screenshot(\"screenshots/output.png\")\n", + " image_documents = SimpleDirectoryReader(\"./screenshots\").load_data()\n", + "\n", + " html = driver.page_source\n", + " page_content_summary = page_summarizer.summarize(html)\n", + "\n", + " current_state = f\"\"\"\n", + "- screenshot: [SCREENSHOT]\n", + "- page_content_summary: {page_content_summary}\n", + " \"\"\"\n", + " \n", + " elif next_engine == \"Python Engine\":\n", + " rewritten_instruction = rewriter.rewrite_instruction(instruction)\n", + " state = {\n", + " \"html\": driver.page_source\n", + " }\n", + "\n", + " code = python_engine.generate_code(rewritten_instruction, state)\n", + " output = python_engine.execute_code(code, state)\n", + " print(output)\n", + " \n", + " current_state = f\"\"\"\n", + "- output: {output}\n", + "\"\"\"\n", + " \n", + " # elif next_engine == \"IO Engine\":\n", + " # print(\"IO Engine\")\n", + " # break\n", + " \n", + " \n", + " if previous_instructions == \"[NONE]\":\n", + " previous_instructions = f\"\"\"\n", + "- {instruction}\"\"\"\n", + " else:\n", + " previous_instructions += f\"\"\"\n", + "- {instruction}\"\"\"\n", + " \n", + " last_engine = next_engine\n" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "from peft import LoraConfig, TaskType\n", + "peft_config = LoraConfig(task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1)\n" + ] + } + ], + "source": [ + "print(output)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "myenv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/python_engine_examples.txt b/python_engine_examples.txt new file mode 100644 index 00000000..c4fae4b7 --- /dev/null +++ b/python_engine_examples.txt @@ -0,0 +1,28 @@ +Capability: Answer questions using the content of an HTML page +Instruction: Extract the content of the HTML page and do a call to OpenAI GPT-3.5 turbo to answer the question 'How was falcon-11B trained?' +State: + html ('str'): The content of the HTML page being analyzed +Code: +# Let's think step by step +# We first need to extract the text content of the HTML page. +# Then we will use an LLM to answer the question. Because the page content might not fit in the LLM context window, we will use Llama Index to perform RAG on the extracted text content. + +# First, We use the trafilatura library to extract the text content of the HTML page +import trafilatura + +page_content = trafilatura.extract(html) + +# Next we will use Llama Index to perform RAG on the extracted text content +from llama_index.core import Document, VectorStoreIndex + +documents = [Document(text=page_content)] + +# We then build index +index = VectorStoreIndex.from_documents(documents) +query_engine = index.as_query_engine() + +# We will use the query engine to answer the question +instruction = "How was falcon-11B trained?" + +# We finally store the output in the variable 'output' +output = query_engine.query(instruction) \ No newline at end of file