From 6bb598820ec670db0c333b4950362a8844c3c0ab Mon Sep 17 00:00:00 2001 From: fabianegli Date: Fri, 18 Oct 2024 23:24:13 +0200 Subject: [PATCH 1/5] ignore venv --- .gitignore | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 496ee2c..edef0f5 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,3 @@ -.DS_Store \ No newline at end of file +.DS_Store +.venv +venv From 0c05461f84ca0d8ad9fa525821c10dbd0937db92 Mon Sep 17 00:00:00 2001 From: fabianegli Date: Sat, 19 Oct 2024 13:06:34 +0200 Subject: [PATCH 2/5] simplify repo and added some tests --- .github/workflows/daily_update.yml | 9 +- .github/workflows/main.yml | 36 --- .gitignore | 1 + code/action.php | 33 --- code/dark_visitors.py | 169 ++++++++------ code/test_files/robots.json | 282 ++++++++++++++++++++++++ code/test_files/robots.txt | 41 ++++ code/test_files/table-of-bot-metrics.md | 42 ++++ code/tests.py | 21 ++ robots.txt | 2 +- table-of-bot-metrics.md | 80 +++---- 11 files changed, 531 insertions(+), 185 deletions(-) delete mode 100644 .github/workflows/main.yml delete mode 100644 code/action.php create mode 100644 code/test_files/robots.json create mode 100644 code/test_files/robots.txt create mode 100644 code/test_files/table-of-bot-metrics.md create mode 100644 code/tests.py diff --git a/.github/workflows/daily_update.yml b/.github/workflows/daily_update.yml index 6b6624a..11eeab3 100644 --- a/.github/workflows/daily_update.yml +++ b/.github/workflows/daily_update.yml @@ -1,5 +1,8 @@ name: Daily Update from Dark Visitors on: + push: + branches: + - "main" schedule: - cron: "0 0 * * *" @@ -22,9 +25,3 @@ jobs: git add -A git diff --quiet && git diff --staged --quiet || (git commit -m "Daily update from Dark Visitors" && git push) shell: bash - call-main: - needs: dark-visitors - uses: ./.github/workflows/main.yml - secrets: inherit - with: - message: "Daily update from Dark Visitors" diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml deleted file mode 100644 index ea8edc5..0000000 --- a/.github/workflows/main.yml +++ /dev/null @@ -1,36 +0,0 @@ -on: - workflow_call: - inputs: - message: - type: string - required: true - description: The message to commit - push: - paths: - - 'robots.json' - -jobs: - ai-robots-txt: - runs-on: ubuntu-latest - name: ai-robots-txt - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 2 - - run: | - git config --global user.name "ai.robots.txt" - git config --global user.email "ai.robots.txt@users.noreply.github.com" - git log -1 - git status - echo "Running update script ..." - php -f code/action.php - echo "... done." - git --no-pager diff - git add -A - if [ -n "${{ inputs.message }}" ]; then - git commit -m "${{ inputs.message }}" - else - git commit -m "${{ github.event.head_commit.message }}" - fi - git push - shell: bash diff --git a/.gitignore b/.gitignore index edef0f5..cbe1c29 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ .DS_Store .venv venv +__pycache__ diff --git a/code/action.php b/code/action.php deleted file mode 100644 index f6a1d3d..0000000 --- a/code/action.php +++ /dev/null @@ -1,33 +0,0 @@ - $details) { - $robots_txt .= 'User-agent: '.$robot."\n"; - $robots_table .= '| '.$robot.' | '.$details['operator'].' | '.$details['respect'].' | '.$details['function'].' | '.$details['frequency'].' | '.$details['description'].' | '."\n"; -} - -$robots_txt .= 'Disallow: /'; - -var_dump($robots_txt); -var_dump($robots_table); - -file_put_contents('robots.txt', $robots_txt); -file_put_contents('table-of-bot-metrics.md', $robots_table); diff --git a/code/dark_visitors.py b/code/dark_visitors.py index 5de65fe..838ce67 100644 --- a/code/dark_visitors.py +++ b/code/dark_visitors.py @@ -4,72 +4,103 @@ import requests from bs4 import BeautifulSoup -session = requests.Session() -response = session.get("https://darkvisitors.com/agents") -soup = BeautifulSoup(response.text, "html.parser") - -existing_content = json.loads(Path("./robots.json").read_text()) -to_include = [ - "AI Assistants", - "AI Data Scrapers", - "AI Search Crawlers", - # "Archivers", - # "Developer Helpers", - # "Fetchers", - # "Intelligence Gatherers", - # "Scrapers", - # "Search Engine Crawlers", - # "SEO Crawlers", - # "Uncategorized", - "Undocumented AI Agents" -] - -for section in soup.find_all("div", {"class": "agent-links-section"}): - category = section.find("h2").get_text() - if category not in to_include: - continue - for agent in section.find_all("a", href=True): - name = agent.find("div", {"class": "agent-name"}).get_text().strip() - desc = agent.find("p").get_text().strip() - - default_values = { - "Unclear at this time.", - "No information provided.", - "No information.", - "No explicit frequency provided." - } - default_value = "Unclear at this time." - - # Parse the operator information from the description if possible - operator = default_value - if "operated by " in desc: - try: - operator = desc.split("operated by ", 1)[1].split(".", 1)[0].strip() - except Exception as e: - print(f"Error: {e}") - - def consolidate(field: str, value: str) -> str: - # New entry - if name not in existing_content: - return value - # New field - if field not in existing_content[name]: - return value - # Unclear value - if existing_content[name][field] in default_values and value not in default_values: - return value - # Existing value - return existing_content[name][field] - - existing_content[name] = { - "operator": consolidate("operator", operator), - "respect": consolidate("respect", default_value), - "function": consolidate("function", f"{category}"), - "frequency": consolidate("frequency", default_value), - "description": consolidate("description", f"{desc} More info can be found at https://darkvisitors.com/agents{agent['href']}") - } - -print(f"Total: {len(existing_content)}") -sorted_keys = sorted(existing_content, key=lambda k: k.lower()) -existing_content = {k: existing_content[k] for k in sorted_keys} -Path("./robots.json").write_text(json.dumps(existing_content, indent=4)) \ No newline at end of file + +def get_updated_robots_json(): + session = requests.Session() + response = session.get("https://darkvisitors.com/agents") + soup = BeautifulSoup(response.text, "html.parser") + + existing_content = json.loads(Path("./robots.json").read_text()) + to_include = [ + "AI Assistants", + "AI Data Scrapers", + "AI Search Crawlers", + # "Archivers", + # "Developer Helpers", + # "Fetchers", + # "Intelligence Gatherers", + # "Scrapers", + # "Search Engine Crawlers", + # "SEO Crawlers", + # "Uncategorized", + "Undocumented AI Agents", + ] + + for section in soup.find_all("div", {"class": "agent-links-section"}): + category = section.find("h2").get_text() + if category not in to_include: + continue + for agent in section.find_all("a", href=True): + name = agent.find("div", {"class": "agent-name"}).get_text().strip() + desc = agent.find("p").get_text().strip() + + default_values = { + "Unclear at this time.", + "No information provided.", + "No information.", + "No explicit frequency provided.", + } + default_value = "Unclear at this time." + + # Parse the operator information from the description if possible + operator = default_value + if "operated by " in desc: + try: + operator = desc.split("operated by ", 1)[1].split(".", 1)[0].strip() + except Exception as e: + print(f"Error: {e}") + + def consolidate(field: str, value: str) -> str: + # New entry + if name not in existing_content: + return value + # New field + if field not in existing_content[name]: + return value + # Unclear value + if ( + existing_content[name][field] in default_values + and value not in default_values + ): + return value + # Existing value + return existing_content[name][field] + + existing_content[name] = { + "operator": consolidate("operator", operator), + "respect": consolidate("respect", default_value), + "function": consolidate("function", f"{category}"), + "frequency": consolidate("frequency", default_value), + "description": consolidate( + "description", + f"{desc} More info can be found at https://darkvisitors.com/agents{agent['href']}", + ), + } + + print(f"Total: {len(existing_content)}") + sorted_keys = sorted(existing_content, key=lambda k: k.lower()) + sorted_robots = {k: existing_content[k] for k in sorted_keys} + return sorted_robots + + +def json_to_txt(robots_json): + robots_txt = "\n".join(f"User-agent: {k}" for k in robots_json.keys()) + robots_txt += "\nDisallow: /\n" + return robots_txt + + +def json_to_table(robots_json): + table = "| Name | Operator | Respects `robots.txt` | Data use | Visit regularity | Description |\n" + table += "|-----|----------|-----------------------|----------|------------------|-------------|\n" + + for name, robot in robots_json.items(): + table += f'| {name} | {robot["operator"]} | {robot["respect"]} | {robot["function"]} | {robot["frequency"]} | {robot["description"]} |\n' + + return table + + +if __name__ == "__main__": + robots_json = get_updated_robots_json() + Path("./robots.json").write_text(json.dumps(robots_json, indent=4)) + Path("./robots.txt").write_text(json_to_txt(robots_json)) + Path("./table-of-bot-metrics.md").write_text(json_to_table(robots_json)) diff --git a/code/test_files/robots.json b/code/test_files/robots.json new file mode 100644 index 0000000..c50d63c --- /dev/null +++ b/code/test_files/robots.json @@ -0,0 +1,282 @@ +{ + "AI2Bot": { + "description": "Explores 'certain domains' to find web content.", + "frequency": "No information provided.", + "function": "Content is used to train open language models.", + "operator": "[Ai2](https://allenai.org/crawler)", + "respect": "Yes" + }, + "Ai2Bot-Dolma": { + "description": "Explores 'certain domains' to find web content.", + "frequency": "No information provided.", + "function": "Content is used to train open language models.", + "operator": "[Ai2](https://allenai.org/crawler)", + "respect": "Yes" + }, + "Amazonbot": { + "operator": "Amazon", + "respect": "Yes", + "function": "Service improvement and enabling answers for Alexa users.", + "frequency": "No information provided.", + "description": "Includes references to crawled website when surfacing answers via Alexa; does not clearly outline other uses." + }, + "anthropic-ai": { + "operator": "[Anthropic](https://www.anthropic.com)", + "respect": "Unclear at this time.", + "function": "Scrapes data to train Anthropic's AI products.", + "frequency": "No information provided.", + "description": "Scrapes data to train LLMs and AI products offered by Anthropic." + }, + "Applebot": { + "operator": "Unclear at this time.", + "respect": "Unclear at this time.", + "function": "AI Search Crawlers", + "frequency": "Unclear at this time.", + "description": "Applebot is a web crawler used by Apple to index search results that allow the Siri AI Assistant to answer user questions. Siri's answers normally contain references to the website. More info can be found at https://darkvisitors.com/agents/agents/applebot" + }, + "Applebot-Extended": { + "operator": "[Apple](https://support.apple.com/en-us/119829#datausage)", + "respect": "Yes", + "function": "Powers features in Siri, Spotlight, Safari, Apple Intelligence, and others.", + "frequency": "Unclear at this time.", + "description": "Apple has a secondary user agent, Applebot-Extended ... [that is] used to train Apple's foundation models powering generative AI features across Apple products, including Apple Intelligence, Services, and Developer Tools." + }, + "Bytespider": { + "operator": "ByteDance", + "respect": "No", + "function": "LLM training.", + "frequency": "Unclear at this time.", + "description": "Downloads data to train LLMS, including ChatGPT competitors." + }, + "CCBot": { + "operator": "[Common Crawl Foundation](https://commoncrawl.org)", + "respect": "[Yes](https://commoncrawl.org/ccbot)", + "function": "Provides open crawl dataset, used for many purposes, including Machine Learning/AI.", + "frequency": "Monthly at present.", + "description": "Web archive going back to 2008. [Cited in thousands of research papers per year](https://commoncrawl.org/research-papers)." + }, + "ChatGPT-User": { + "operator": "[OpenAI](https://openai.com)", + "respect": "Yes", + "function": "Takes action based on user prompts.", + "frequency": "Only when prompted by a user.", + "description": "Used by plugins in ChatGPT to answer queries based on user input." + }, + "Claude-Web": { + "operator": "[Anthropic](https://www.anthropic.com)", + "respect": "Unclear at this time.", + "function": "Scrapes data to train Anthropic's AI products.", + "frequency": "No information provided.", + "description": "Scrapes data to train LLMs and AI products offered by Anthropic." + }, + "ClaudeBot": { + "operator": "[Anthropic](https://www.anthropic.com)", + "respect": "[Yes](https://support.anthropic.com/en/articles/8896518-does-anthropic-crawl-data-from-the-web-and-how-can-site-owners-block-the-crawler)", + "function": "Scrapes data to train Anthropic's AI products.", + "frequency": "No information provided.", + "description": "Scrapes data to train LLMs and AI products offered by Anthropic." + }, + "cohere-ai": { + "operator": "[Cohere](https://cohere.com)", + "respect": "Unclear at this time.", + "function": "Retrieves data to provide responses to user-initiated prompts.", + "frequency": "Takes action based on user prompts.", + "description": "Retrieves data based on user prompts." + }, + "Diffbot": { + "operator": "[Diffbot](https://www.diffbot.com/)", + "respect": "At the discretion of Diffbot users.", + "function": "Aggregates structured web data for monitoring and AI model training.", + "frequency": "Unclear at this time.", + "description": "Diffbot is an application used to parse web pages into structured data; this data is used for monitoring or AI model training." + }, + "FacebookBot": { + "operator": "Meta/Facebook", + "respect": "[Yes](https://developers.facebook.com/docs/sharing/bot/)", + "function": "Training language models", + "frequency": "Up to 1 page per second", + "description": "Officially used for training Meta \"speech recognition technology,\" unknown if used to train Meta AI specifically." + }, + "facebookexternalhit": { + "description": "Unclear at this time.", + "frequency": "Unclear at this time.", + "function": "No information.", + "operator": "Meta/Facebook", + "respect": "[Yes](https://developers.facebook.com/docs/sharing/bot/)" + }, + "FriendlyCrawler": { + "description": "Unclear who the operator is; but data is used for training/machine learning.", + "frequency": "Unclear at this time.", + "function": "We are using the data from the crawler to build datasets for machine learning experiments.", + "operator": "Unknown", + "respect": "[Yes](https://imho.alex-kunz.com/2024/01/25/an-update-on-friendly-crawler)" + }, + "Google-Extended": { + "operator": "Google", + "respect": "[Yes](https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers)", + "function": "LLM training.", + "frequency": "No information.", + "description": "Used to train Gemini and Vertex AI generative APIs. Does not impact a site's inclusion or ranking in Google Search." + }, + "GoogleOther": { + "description": "\"Used by various product teams for fetching publicly accessible content from sites. For example, it may be used for one-off crawls for internal research and development.\"", + "frequency": "No information.", + "function": "Scrapes data.", + "operator": "Google", + "respect": "[Yes](https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers)" + }, + "GoogleOther-Image": { + "description": "\"Used by various product teams for fetching publicly accessible content from sites. For example, it may be used for one-off crawls for internal research and development.\"", + "frequency": "No information.", + "function": "Scrapes data.", + "operator": "Google", + "respect": "[Yes](https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers)" + }, + "GoogleOther-Video": { + "description": "\"Used by various product teams for fetching publicly accessible content from sites. For example, it may be used for one-off crawls for internal research and development.\"", + "frequency": "No information.", + "function": "Scrapes data.", + "operator": "Google", + "respect": "[Yes](https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers)" + }, + "GPTBot": { + "operator": "[OpenAI](https://openai.com)", + "respect": "Yes", + "function": "Scrapes data to train OpenAI's products.", + "frequency": "No information.", + "description": "Data is used to train current and future models, removed paywalled data, PII and data that violates the company's policies." + }, + "iaskspider/2.0": { + "description": "Used to provide answers to user queries.", + "frequency": "Unclear at this time.", + "function": "Crawls sites to provide answers to user queries.", + "operator": "iAsk", + "respect": "No" + }, + "ICC-Crawler": { + "description": "Use the collected data for artificial intelligence technologies; provide data to third parties, including commercial companies; those companies can use the data for their own business.", + "frequency": "No information.", + "function": "Scrapes data to train and support AI technologies.", + "operator": "[NICT](https://nict.go.jp)", + "respect": "Yes" + }, + "ImagesiftBot": { + "description": "Once images and text are downloaded from a webpage, ImageSift analyzes this data from the page and stores the information in an index. Our web intelligence products use this index to enable search and retrieval of similar images.", + "frequency": "No information.", + "function": "ImageSiftBot is a web crawler that scrapes the internet for publicly available images to support our suite of web intelligence products", + "operator": "[ImageSift](https://imagesift.com)", + "respect": "[Yes](https://imagesift.com/about)" + }, + "img2dataset": { + "description": "Downloads large sets of images into datasets for LLM training or other purposes.", + "frequency": "At the discretion of img2dataset users.", + "function": "Scrapes images for use in LLMs.", + "operator": "[img2dataset](https://github.com/rom1504/img2dataset)", + "respect": "Unclear at this time." + }, + "ISSCyberRiskCrawler": { + "description": "Used to train machine learning based models to quantify cyber risk.", + "frequency": "No information.", + "function": "Scrapes data to train machine learning models.", + "operator": "[ISS-Corporate](https://iss-cyber.com)", + "respect": "No" + }, + "Kangaroo Bot": { + "operator": "Unclear at this time.", + "respect": "Unclear at this time.", + "function": "AI Data Scrapers", + "frequency": "Unclear at this time.", + "description": "Kangaroo Bot is used by the company Kangaroo LLM to download data to train AI models tailored to Australian language and culture. More info can be found at https://darkvisitors.com/agents/agents/kangaroo-bot" + }, + "Meta-ExternalAgent": { + "operator": "[Meta](https://developers.facebook.com/docs/sharing/webmasters/web-crawlers)", + "respect": "Yes.", + "function": "Used to train models and improve products.", + "frequency": "No information.", + "description": "\"The Meta-ExternalAgent crawler crawls the web for use cases such as training AI models or improving products by indexing content directly.\"" + }, + "Meta-ExternalFetcher": { + "operator": "Unclear at this time.", + "respect": "Unclear at this time.", + "function": "AI Assistants", + "frequency": "Unclear at this time.", + "description": "Meta-ExternalFetcher is dispatched by Meta AI products in response to user prompts, when they need to fetch an individual links. More info can be found at https://darkvisitors.com/agents/agents/meta-externalfetcher" + }, + "OAI-SearchBot": { + "operator": "[OpenAI](https://openai.com)", + "respect": "[Yes](https://platform.openai.com/docs/bots)", + "function": "Search result generation.", + "frequency": "No information.", + "description": "Crawls sites to surface as results in SearchGPT." + }, + "omgili": { + "operator": "[Webz.io](https://webz.io/)", + "respect": "[Yes](https://webz.io/blog/web-data/what-is-the-omgili-bot-and-why-is-it-crawling-your-website/)", + "function": "Data is sold.", + "frequency": "No information.", + "description": "Crawls sites for APIs used by Hootsuite, Sprinklr, NetBase, and other companies. Data also sold for research purposes or LLM training." + }, + "omgilibot": { + "description": "Legacy user agent initially used for Omgili search engine. Unknown if still used, `omgili` agent still used by Webz.io.", + "frequency": "No information.", + "function": "Data is sold.", + "operator": "[Webz.io](https://webz.io/)", + "respect": "[Yes](https://web.archive.org/web/20170704003301/http://omgili.com/Crawler.html)" + }, + "PerplexityBot": { + "operator": "[Perplexity](https://www.perplexity.ai/)", + "respect": "[No](https://www.macstories.net/stories/wired-confirms-perplexity-is-bypassing-efforts-by-websites-to-block-its-web-crawler/)", + "function": "Used to answer queries at the request of users.", + "frequency": "Takes action based on user prompts.", + "description": "Operated by Perplexity to obtain results in response to user queries." + }, + "PetalBot": { + "description": "Operated by Huawei to provide search and AI assistant services.", + "frequency": "No explicit frequency provided.", + "function": "Used to provide recommendations in Hauwei assistant and AI search services.", + "operator": "[Huawei](https://huawei.com/)", + "respect": "Yes" + }, + "Scrapy": { + "description": "\"AI and machine learning applications often need large amounts of quality data, and web data extraction is a fast, efficient way to build structured data sets.\"", + "frequency": "No information.", + "function": "Scrapes data for a variety of uses including training AI.", + "operator": "[Zyte](https://www.zyte.com)", + "respect": "Unclear at this time." + }, + "Sidetrade indexer bot": { + "description": "AI product training.", + "frequency": "No information.", + "function": "Extracts data for a variety of uses including training AI.", + "operator": "[Sidetrade](https://www.sidetrade.com)", + "respect": "Unclear at this time." + }, + "Timpibot": { + "operator": "[Timpi](https://timpi.io)", + "respect": "Unclear at this time.", + "function": "Scrapes data for use in training LLMs.", + "frequency": "No information.", + "description": "Makes data available for training AI models." + }, + "VelenPublicWebCrawler": { + "description": "\"Our goal with this crawler is to build business datasets and machine learning models to better understand the web.\"", + "frequency": "No information.", + "function": "Scrapes data for business data sets and machine learning models.", + "operator": "[Velen Crawler](https://velen.io)", + "respect": "[Yes](https://velen.io)" + }, + "Webzio-Extended": { + "operator": "Unclear at this time.", + "respect": "Unclear at this time.", + "function": "AI Data Scrapers", + "frequency": "Unclear at this time.", + "description": "Webzio-Extended is a web crawler used by Webz.io to maintain a repository of web crawl data that it sells to other companies, including those using it to train AI models. More info can be found at https://darkvisitors.com/agents/agents/webzio-extended" + }, + "YouBot": { + "operator": "[You](https://about.you.com/youchat/)", + "respect": "[Yes](https://about.you.com/youbot/)", + "function": "Scrapes data for search engine and LLMs.", + "frequency": "No information.", + "description": "Retrieves data used for You.com web search engine and LLMs." + } +} \ No newline at end of file diff --git a/code/test_files/robots.txt b/code/test_files/robots.txt new file mode 100644 index 0000000..927f6f4 --- /dev/null +++ b/code/test_files/robots.txt @@ -0,0 +1,41 @@ +User-agent: AI2Bot +User-agent: Ai2Bot-Dolma +User-agent: Amazonbot +User-agent: anthropic-ai +User-agent: Applebot +User-agent: Applebot-Extended +User-agent: Bytespider +User-agent: CCBot +User-agent: ChatGPT-User +User-agent: Claude-Web +User-agent: ClaudeBot +User-agent: cohere-ai +User-agent: Diffbot +User-agent: FacebookBot +User-agent: facebookexternalhit +User-agent: FriendlyCrawler +User-agent: Google-Extended +User-agent: GoogleOther +User-agent: GoogleOther-Image +User-agent: GoogleOther-Video +User-agent: GPTBot +User-agent: iaskspider/2.0 +User-agent: ICC-Crawler +User-agent: ImagesiftBot +User-agent: img2dataset +User-agent: ISSCyberRiskCrawler +User-agent: Kangaroo Bot +User-agent: Meta-ExternalAgent +User-agent: Meta-ExternalFetcher +User-agent: OAI-SearchBot +User-agent: omgili +User-agent: omgilibot +User-agent: PerplexityBot +User-agent: PetalBot +User-agent: Scrapy +User-agent: Sidetrade indexer bot +User-agent: Timpibot +User-agent: VelenPublicWebCrawler +User-agent: Webzio-Extended +User-agent: YouBot +Disallow: / diff --git a/code/test_files/table-of-bot-metrics.md b/code/test_files/table-of-bot-metrics.md new file mode 100644 index 0000000..257ba99 --- /dev/null +++ b/code/test_files/table-of-bot-metrics.md @@ -0,0 +1,42 @@ +| Name | Operator | Respects `robots.txt` | Data use | Visit regularity | Description | +|-----|----------|-----------------------|----------|------------------|-------------| +| AI2Bot | [Ai2](https://allenai.org/crawler) | Yes | Content is used to train open language models. | No information provided. | Explores 'certain domains' to find web content. | +| Ai2Bot-Dolma | [Ai2](https://allenai.org/crawler) | Yes | Content is used to train open language models. | No information provided. | Explores 'certain domains' to find web content. | +| Amazonbot | Amazon | Yes | Service improvement and enabling answers for Alexa users. | No information provided. | Includes references to crawled website when surfacing answers via Alexa; does not clearly outline other uses. | +| anthropic-ai | [Anthropic](https://www.anthropic.com) | Unclear at this time. | Scrapes data to train Anthropic's AI products. | No information provided. | Scrapes data to train LLMs and AI products offered by Anthropic. | +| Applebot | Unclear at this time. | Unclear at this time. | AI Search Crawlers | Unclear at this time. | Applebot is a web crawler used by Apple to index search results that allow the Siri AI Assistant to answer user questions. Siri's answers normally contain references to the website. More info can be found at https://darkvisitors.com/agents/agents/applebot | +| Applebot-Extended | [Apple](https://support.apple.com/en-us/119829#datausage) | Yes | Powers features in Siri, Spotlight, Safari, Apple Intelligence, and others. | Unclear at this time. | Apple has a secondary user agent, Applebot-Extended ... [that is] used to train Apple's foundation models powering generative AI features across Apple products, including Apple Intelligence, Services, and Developer Tools. | +| Bytespider | ByteDance | No | LLM training. | Unclear at this time. | Downloads data to train LLMS, including ChatGPT competitors. | +| CCBot | [Common Crawl Foundation](https://commoncrawl.org) | [Yes](https://commoncrawl.org/ccbot) | Provides open crawl dataset, used for many purposes, including Machine Learning/AI. | Monthly at present. | Web archive going back to 2008. [Cited in thousands of research papers per year](https://commoncrawl.org/research-papers). | +| ChatGPT-User | [OpenAI](https://openai.com) | Yes | Takes action based on user prompts. | Only when prompted by a user. | Used by plugins in ChatGPT to answer queries based on user input. | +| Claude-Web | [Anthropic](https://www.anthropic.com) | Unclear at this time. | Scrapes data to train Anthropic's AI products. | No information provided. | Scrapes data to train LLMs and AI products offered by Anthropic. | +| ClaudeBot | [Anthropic](https://www.anthropic.com) | [Yes](https://support.anthropic.com/en/articles/8896518-does-anthropic-crawl-data-from-the-web-and-how-can-site-owners-block-the-crawler) | Scrapes data to train Anthropic's AI products. | No information provided. | Scrapes data to train LLMs and AI products offered by Anthropic. | +| cohere-ai | [Cohere](https://cohere.com) | Unclear at this time. | Retrieves data to provide responses to user-initiated prompts. | Takes action based on user prompts. | Retrieves data based on user prompts. | +| Diffbot | [Diffbot](https://www.diffbot.com/) | At the discretion of Diffbot users. | Aggregates structured web data for monitoring and AI model training. | Unclear at this time. | Diffbot is an application used to parse web pages into structured data; this data is used for monitoring or AI model training. | +| FacebookBot | Meta/Facebook | [Yes](https://developers.facebook.com/docs/sharing/bot/) | Training language models | Up to 1 page per second | Officially used for training Meta "speech recognition technology," unknown if used to train Meta AI specifically. | +| facebookexternalhit | Meta/Facebook | [Yes](https://developers.facebook.com/docs/sharing/bot/) | No information. | Unclear at this time. | Unclear at this time. | +| FriendlyCrawler | Unknown | [Yes](https://imho.alex-kunz.com/2024/01/25/an-update-on-friendly-crawler) | We are using the data from the crawler to build datasets for machine learning experiments. | Unclear at this time. | Unclear who the operator is; but data is used for training/machine learning. | +| Google-Extended | Google | [Yes](https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers) | LLM training. | No information. | Used to train Gemini and Vertex AI generative APIs. Does not impact a site's inclusion or ranking in Google Search. | +| GoogleOther | Google | [Yes](https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers) | Scrapes data. | No information. | "Used by various product teams for fetching publicly accessible content from sites. For example, it may be used for one-off crawls for internal research and development." | +| GoogleOther-Image | Google | [Yes](https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers) | Scrapes data. | No information. | "Used by various product teams for fetching publicly accessible content from sites. For example, it may be used for one-off crawls for internal research and development." | +| GoogleOther-Video | Google | [Yes](https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers) | Scrapes data. | No information. | "Used by various product teams for fetching publicly accessible content from sites. For example, it may be used for one-off crawls for internal research and development." | +| GPTBot | [OpenAI](https://openai.com) | Yes | Scrapes data to train OpenAI's products. | No information. | Data is used to train current and future models, removed paywalled data, PII and data that violates the company's policies. | +| iaskspider/2.0 | iAsk | No | Crawls sites to provide answers to user queries. | Unclear at this time. | Used to provide answers to user queries. | +| ICC-Crawler | [NICT](https://nict.go.jp) | Yes | Scrapes data to train and support AI technologies. | No information. | Use the collected data for artificial intelligence technologies; provide data to third parties, including commercial companies; those companies can use the data for their own business. | +| ImagesiftBot | [ImageSift](https://imagesift.com) | [Yes](https://imagesift.com/about) | ImageSiftBot is a web crawler that scrapes the internet for publicly available images to support our suite of web intelligence products | No information. | Once images and text are downloaded from a webpage, ImageSift analyzes this data from the page and stores the information in an index. Our web intelligence products use this index to enable search and retrieval of similar images. | +| img2dataset | [img2dataset](https://github.com/rom1504/img2dataset) | Unclear at this time. | Scrapes images for use in LLMs. | At the discretion of img2dataset users. | Downloads large sets of images into datasets for LLM training or other purposes. | +| ISSCyberRiskCrawler | [ISS-Corporate](https://iss-cyber.com) | No | Scrapes data to train machine learning models. | No information. | Used to train machine learning based models to quantify cyber risk. | +| Kangaroo Bot | Unclear at this time. | Unclear at this time. | AI Data Scrapers | Unclear at this time. | Kangaroo Bot is used by the company Kangaroo LLM to download data to train AI models tailored to Australian language and culture. More info can be found at https://darkvisitors.com/agents/agents/kangaroo-bot | +| Meta-ExternalAgent | [Meta](https://developers.facebook.com/docs/sharing/webmasters/web-crawlers) | Yes. | Used to train models and improve products. | No information. | "The Meta-ExternalAgent crawler crawls the web for use cases such as training AI models or improving products by indexing content directly." | +| Meta-ExternalFetcher | Unclear at this time. | Unclear at this time. | AI Assistants | Unclear at this time. | Meta-ExternalFetcher is dispatched by Meta AI products in response to user prompts, when they need to fetch an individual links. More info can be found at https://darkvisitors.com/agents/agents/meta-externalfetcher | +| OAI-SearchBot | [OpenAI](https://openai.com) | [Yes](https://platform.openai.com/docs/bots) | Search result generation. | No information. | Crawls sites to surface as results in SearchGPT. | +| omgili | [Webz.io](https://webz.io/) | [Yes](https://webz.io/blog/web-data/what-is-the-omgili-bot-and-why-is-it-crawling-your-website/) | Data is sold. | No information. | Crawls sites for APIs used by Hootsuite, Sprinklr, NetBase, and other companies. Data also sold for research purposes or LLM training. | +| omgilibot | [Webz.io](https://webz.io/) | [Yes](https://web.archive.org/web/20170704003301/http://omgili.com/Crawler.html) | Data is sold. | No information. | Legacy user agent initially used for Omgili search engine. Unknown if still used, `omgili` agent still used by Webz.io. | +| PerplexityBot | [Perplexity](https://www.perplexity.ai/) | [No](https://www.macstories.net/stories/wired-confirms-perplexity-is-bypassing-efforts-by-websites-to-block-its-web-crawler/) | Used to answer queries at the request of users. | Takes action based on user prompts. | Operated by Perplexity to obtain results in response to user queries. | +| PetalBot | [Huawei](https://huawei.com/) | Yes | Used to provide recommendations in Hauwei assistant and AI search services. | No explicit frequency provided. | Operated by Huawei to provide search and AI assistant services. | +| Scrapy | [Zyte](https://www.zyte.com) | Unclear at this time. | Scrapes data for a variety of uses including training AI. | No information. | "AI and machine learning applications often need large amounts of quality data, and web data extraction is a fast, efficient way to build structured data sets." | +| Sidetrade indexer bot | [Sidetrade](https://www.sidetrade.com) | Unclear at this time. | Extracts data for a variety of uses including training AI. | No information. | AI product training. | +| Timpibot | [Timpi](https://timpi.io) | Unclear at this time. | Scrapes data for use in training LLMs. | No information. | Makes data available for training AI models. | +| VelenPublicWebCrawler | [Velen Crawler](https://velen.io) | [Yes](https://velen.io) | Scrapes data for business data sets and machine learning models. | No information. | "Our goal with this crawler is to build business datasets and machine learning models to better understand the web." | +| Webzio-Extended | Unclear at this time. | Unclear at this time. | AI Data Scrapers | Unclear at this time. | Webzio-Extended is a web crawler used by Webz.io to maintain a repository of web crawl data that it sells to other companies, including those using it to train AI models. More info can be found at https://darkvisitors.com/agents/agents/webzio-extended | +| YouBot | [You](https://about.you.com/youchat/) | [Yes](https://about.you.com/youbot/) | Scrapes data for search engine and LLMs. | No information. | Retrieves data used for You.com web search engine and LLMs. | diff --git a/code/tests.py b/code/tests.py new file mode 100644 index 0000000..ffa7574 --- /dev/null +++ b/code/tests.py @@ -0,0 +1,21 @@ +"""These tests can be run with pytest. +This requires pytest: pip install pytest +cd to the `code` directory and run `pytest` +""" + +import json +from pathlib import Path + +from dark_visitors import json_to_txt, json_to_table + + +def test_robots_txt_creation(): + robots_json = json.loads(Path("test_files/robots.json").read_text()) + robots_txt = json_to_txt(robots_json) + assert Path("test_files/robots.txt").read_text() == robots_txt + + +def test_table_of_bot_metrices_md(): + robots_json = json.loads(Path("test_files/robots.json").read_text()) + robots_table = json_to_table(robots_json) + assert Path("test_files/table-of-bot-metrics.md").read_text() == robots_table diff --git a/robots.txt b/robots.txt index 13681f3..927f6f4 100644 --- a/robots.txt +++ b/robots.txt @@ -38,4 +38,4 @@ User-agent: Timpibot User-agent: VelenPublicWebCrawler User-agent: Webzio-Extended User-agent: YouBot -Disallow: / \ No newline at end of file +Disallow: / diff --git a/table-of-bot-metrics.md b/table-of-bot-metrics.md index 0e6884c..257ba99 100644 --- a/table-of-bot-metrics.md +++ b/table-of-bot-metrics.md @@ -1,42 +1,42 @@ | Name | Operator | Respects `robots.txt` | Data use | Visit regularity | Description | |-----|----------|-----------------------|----------|------------------|-------------| -| AI2Bot | [Ai2](https://allenai.org/crawler) | Yes | Content is used to train open language models. | No information provided. | Explores 'certain domains' to find web content. | -| Ai2Bot-Dolma | [Ai2](https://allenai.org/crawler) | Yes | Content is used to train open language models. | No information provided. | Explores 'certain domains' to find web content. | -| Amazonbot | Amazon | Yes | Service improvement and enabling answers for Alexa users. | No information provided. | Includes references to crawled website when surfacing answers via Alexa; does not clearly outline other uses. | -| anthropic-ai | [Anthropic](https://www.anthropic.com) | Unclear at this time. | Scrapes data to train Anthropic's AI products. | No information provided. | Scrapes data to train LLMs and AI products offered by Anthropic. | -| Applebot | Unclear at this time. | Unclear at this time. | AI Search Crawlers | Unclear at this time. | Applebot is a web crawler used by Apple to index search results that allow the Siri AI Assistant to answer user questions. Siri's answers normally contain references to the website. More info can be found at https://darkvisitors.com/agents/agents/applebot | -| Applebot-Extended | [Apple](https://support.apple.com/en-us/119829#datausage) | Yes | Powers features in Siri, Spotlight, Safari, Apple Intelligence, and others. | Unclear at this time. | Apple has a secondary user agent, Applebot-Extended ... [that is] used to train Apple's foundation models powering generative AI features across Apple products, including Apple Intelligence, Services, and Developer Tools. | -| Bytespider | ByteDance | No | LLM training. | Unclear at this time. | Downloads data to train LLMS, including ChatGPT competitors. | -| CCBot | [Common Crawl Foundation](https://commoncrawl.org) | [Yes](https://commoncrawl.org/ccbot) | Provides open crawl dataset, used for many purposes, including Machine Learning/AI. | Monthly at present. | Web archive going back to 2008. [Cited in thousands of research papers per year](https://commoncrawl.org/research-papers). | -| ChatGPT-User | [OpenAI](https://openai.com) | Yes | Takes action based on user prompts. | Only when prompted by a user. | Used by plugins in ChatGPT to answer queries based on user input. | -| Claude-Web | [Anthropic](https://www.anthropic.com) | Unclear at this time. | Scrapes data to train Anthropic's AI products. | No information provided. | Scrapes data to train LLMs and AI products offered by Anthropic. | -| ClaudeBot | [Anthropic](https://www.anthropic.com) | [Yes](https://support.anthropic.com/en/articles/8896518-does-anthropic-crawl-data-from-the-web-and-how-can-site-owners-block-the-crawler) | Scrapes data to train Anthropic's AI products. | No information provided. | Scrapes data to train LLMs and AI products offered by Anthropic. | -| cohere-ai | [Cohere](https://cohere.com) | Unclear at this time. | Retrieves data to provide responses to user-initiated prompts. | Takes action based on user prompts. | Retrieves data based on user prompts. | -| Diffbot | [Diffbot](https://www.diffbot.com/) | At the discretion of Diffbot users. | Aggregates structured web data for monitoring and AI model training. | Unclear at this time. | Diffbot is an application used to parse web pages into structured data; this data is used for monitoring or AI model training. | -| FacebookBot | Meta/Facebook | [Yes](https://developers.facebook.com/docs/sharing/bot/) | Training language models | Up to 1 page per second | Officially used for training Meta "speech recognition technology," unknown if used to train Meta AI specifically. | -| facebookexternalhit | Meta/Facebook | [Yes](https://developers.facebook.com/docs/sharing/bot/) | No information. | Unclear at this time. | Unclear at this time. | -| FriendlyCrawler | Unknown | [Yes](https://imho.alex-kunz.com/2024/01/25/an-update-on-friendly-crawler) | We are using the data from the crawler to build datasets for machine learning experiments. | Unclear at this time. | Unclear who the operator is; but data is used for training/machine learning. | -| Google-Extended | Google | [Yes](https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers) | LLM training. | No information. | Used to train Gemini and Vertex AI generative APIs. Does not impact a site's inclusion or ranking in Google Search. | -| GoogleOther | Google | [Yes](https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers) | Scrapes data. | No information. | "Used by various product teams for fetching publicly accessible content from sites. For example, it may be used for one-off crawls for internal research and development." | -| GoogleOther-Image | Google | [Yes](https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers) | Scrapes data. | No information. | "Used by various product teams for fetching publicly accessible content from sites. For example, it may be used for one-off crawls for internal research and development." | -| GoogleOther-Video | Google | [Yes](https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers) | Scrapes data. | No information. | "Used by various product teams for fetching publicly accessible content from sites. For example, it may be used for one-off crawls for internal research and development." | -| GPTBot | [OpenAI](https://openai.com) | Yes | Scrapes data to train OpenAI's products. | No information. | Data is used to train current and future models, removed paywalled data, PII and data that violates the company's policies. | -| iaskspider/2.0 | iAsk | No | Crawls sites to provide answers to user queries. | Unclear at this time. | Used to provide answers to user queries. | -| ICC-Crawler | [NICT](https://nict.go.jp) | Yes | Scrapes data to train and support AI technologies. | No information. | Use the collected data for artificial intelligence technologies; provide data to third parties, including commercial companies; those companies can use the data for their own business. | -| ImagesiftBot | [ImageSift](https://imagesift.com) | [Yes](https://imagesift.com/about) | ImageSiftBot is a web crawler that scrapes the internet for publicly available images to support our suite of web intelligence products | No information. | Once images and text are downloaded from a webpage, ImageSift analyzes this data from the page and stores the information in an index. Our web intelligence products use this index to enable search and retrieval of similar images. | -| img2dataset | [img2dataset](https://github.com/rom1504/img2dataset) | Unclear at this time. | Scrapes images for use in LLMs. | At the discretion of img2dataset users. | Downloads large sets of images into datasets for LLM training or other purposes. | -| ISSCyberRiskCrawler | [ISS-Corporate](https://iss-cyber.com) | No | Scrapes data to train machine learning models. | No information. | Used to train machine learning based models to quantify cyber risk. | -| Kangaroo Bot | Unclear at this time. | Unclear at this time. | AI Data Scrapers | Unclear at this time. | Kangaroo Bot is used by the company Kangaroo LLM to download data to train AI models tailored to Australian language and culture. More info can be found at https://darkvisitors.com/agents/agents/kangaroo-bot | -| Meta-ExternalAgent | [Meta](https://developers.facebook.com/docs/sharing/webmasters/web-crawlers) | Yes. | Used to train models and improve products. | No information. | "The Meta-ExternalAgent crawler crawls the web for use cases such as training AI models or improving products by indexing content directly." | -| Meta-ExternalFetcher | Unclear at this time. | Unclear at this time. | AI Assistants | Unclear at this time. | Meta-ExternalFetcher is dispatched by Meta AI products in response to user prompts, when they need to fetch an individual links. More info can be found at https://darkvisitors.com/agents/agents/meta-externalfetcher | -| OAI-SearchBot | [OpenAI](https://openai.com) | [Yes](https://platform.openai.com/docs/bots) | Search result generation. | No information. | Crawls sites to surface as results in SearchGPT. | -| omgili | [Webz.io](https://webz.io/) | [Yes](https://webz.io/blog/web-data/what-is-the-omgili-bot-and-why-is-it-crawling-your-website/) | Data is sold. | No information. | Crawls sites for APIs used by Hootsuite, Sprinklr, NetBase, and other companies. Data also sold for research purposes or LLM training. | -| omgilibot | [Webz.io](https://webz.io/) | [Yes](https://web.archive.org/web/20170704003301/http://omgili.com/Crawler.html) | Data is sold. | No information. | Legacy user agent initially used for Omgili search engine. Unknown if still used, `omgili` agent still used by Webz.io. | -| PerplexityBot | [Perplexity](https://www.perplexity.ai/) | [No](https://www.macstories.net/stories/wired-confirms-perplexity-is-bypassing-efforts-by-websites-to-block-its-web-crawler/) | Used to answer queries at the request of users. | Takes action based on user prompts. | Operated by Perplexity to obtain results in response to user queries. | -| PetalBot | [Huawei](https://huawei.com/) | Yes | Used to provide recommendations in Hauwei assistant and AI search services. | No explicit frequency provided. | Operated by Huawei to provide search and AI assistant services. | -| Scrapy | [Zyte](https://www.zyte.com) | Unclear at this time. | Scrapes data for a variety of uses including training AI. | No information. | "AI and machine learning applications often need large amounts of quality data, and web data extraction is a fast, efficient way to build structured data sets." | -| Sidetrade indexer bot | [Sidetrade](https://www.sidetrade.com) | Unclear at this time. | Extracts data for a variety of uses including training AI. | No information. | AI product training. | -| Timpibot | [Timpi](https://timpi.io) | Unclear at this time. | Scrapes data for use in training LLMs. | No information. | Makes data available for training AI models. | -| VelenPublicWebCrawler | [Velen Crawler](https://velen.io) | [Yes](https://velen.io) | Scrapes data for business data sets and machine learning models. | No information. | "Our goal with this crawler is to build business datasets and machine learning models to better understand the web." | -| Webzio-Extended | Unclear at this time. | Unclear at this time. | AI Data Scrapers | Unclear at this time. | Webzio-Extended is a web crawler used by Webz.io to maintain a repository of web crawl data that it sells to other companies, including those using it to train AI models. More info can be found at https://darkvisitors.com/agents/agents/webzio-extended | -| YouBot | [You](https://about.you.com/youchat/) | [Yes](https://about.you.com/youbot/) | Scrapes data for search engine and LLMs. | No information. | Retrieves data used for You.com web search engine and LLMs. | +| AI2Bot | [Ai2](https://allenai.org/crawler) | Yes | Content is used to train open language models. | No information provided. | Explores 'certain domains' to find web content. | +| Ai2Bot-Dolma | [Ai2](https://allenai.org/crawler) | Yes | Content is used to train open language models. | No information provided. | Explores 'certain domains' to find web content. | +| Amazonbot | Amazon | Yes | Service improvement and enabling answers for Alexa users. | No information provided. | Includes references to crawled website when surfacing answers via Alexa; does not clearly outline other uses. | +| anthropic-ai | [Anthropic](https://www.anthropic.com) | Unclear at this time. | Scrapes data to train Anthropic's AI products. | No information provided. | Scrapes data to train LLMs and AI products offered by Anthropic. | +| Applebot | Unclear at this time. | Unclear at this time. | AI Search Crawlers | Unclear at this time. | Applebot is a web crawler used by Apple to index search results that allow the Siri AI Assistant to answer user questions. Siri's answers normally contain references to the website. More info can be found at https://darkvisitors.com/agents/agents/applebot | +| Applebot-Extended | [Apple](https://support.apple.com/en-us/119829#datausage) | Yes | Powers features in Siri, Spotlight, Safari, Apple Intelligence, and others. | Unclear at this time. | Apple has a secondary user agent, Applebot-Extended ... [that is] used to train Apple's foundation models powering generative AI features across Apple products, including Apple Intelligence, Services, and Developer Tools. | +| Bytespider | ByteDance | No | LLM training. | Unclear at this time. | Downloads data to train LLMS, including ChatGPT competitors. | +| CCBot | [Common Crawl Foundation](https://commoncrawl.org) | [Yes](https://commoncrawl.org/ccbot) | Provides open crawl dataset, used for many purposes, including Machine Learning/AI. | Monthly at present. | Web archive going back to 2008. [Cited in thousands of research papers per year](https://commoncrawl.org/research-papers). | +| ChatGPT-User | [OpenAI](https://openai.com) | Yes | Takes action based on user prompts. | Only when prompted by a user. | Used by plugins in ChatGPT to answer queries based on user input. | +| Claude-Web | [Anthropic](https://www.anthropic.com) | Unclear at this time. | Scrapes data to train Anthropic's AI products. | No information provided. | Scrapes data to train LLMs and AI products offered by Anthropic. | +| ClaudeBot | [Anthropic](https://www.anthropic.com) | [Yes](https://support.anthropic.com/en/articles/8896518-does-anthropic-crawl-data-from-the-web-and-how-can-site-owners-block-the-crawler) | Scrapes data to train Anthropic's AI products. | No information provided. | Scrapes data to train LLMs and AI products offered by Anthropic. | +| cohere-ai | [Cohere](https://cohere.com) | Unclear at this time. | Retrieves data to provide responses to user-initiated prompts. | Takes action based on user prompts. | Retrieves data based on user prompts. | +| Diffbot | [Diffbot](https://www.diffbot.com/) | At the discretion of Diffbot users. | Aggregates structured web data for monitoring and AI model training. | Unclear at this time. | Diffbot is an application used to parse web pages into structured data; this data is used for monitoring or AI model training. | +| FacebookBot | Meta/Facebook | [Yes](https://developers.facebook.com/docs/sharing/bot/) | Training language models | Up to 1 page per second | Officially used for training Meta "speech recognition technology," unknown if used to train Meta AI specifically. | +| facebookexternalhit | Meta/Facebook | [Yes](https://developers.facebook.com/docs/sharing/bot/) | No information. | Unclear at this time. | Unclear at this time. | +| FriendlyCrawler | Unknown | [Yes](https://imho.alex-kunz.com/2024/01/25/an-update-on-friendly-crawler) | We are using the data from the crawler to build datasets for machine learning experiments. | Unclear at this time. | Unclear who the operator is; but data is used for training/machine learning. | +| Google-Extended | Google | [Yes](https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers) | LLM training. | No information. | Used to train Gemini and Vertex AI generative APIs. Does not impact a site's inclusion or ranking in Google Search. | +| GoogleOther | Google | [Yes](https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers) | Scrapes data. | No information. | "Used by various product teams for fetching publicly accessible content from sites. For example, it may be used for one-off crawls for internal research and development." | +| GoogleOther-Image | Google | [Yes](https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers) | Scrapes data. | No information. | "Used by various product teams for fetching publicly accessible content from sites. For example, it may be used for one-off crawls for internal research and development." | +| GoogleOther-Video | Google | [Yes](https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers) | Scrapes data. | No information. | "Used by various product teams for fetching publicly accessible content from sites. For example, it may be used for one-off crawls for internal research and development." | +| GPTBot | [OpenAI](https://openai.com) | Yes | Scrapes data to train OpenAI's products. | No information. | Data is used to train current and future models, removed paywalled data, PII and data that violates the company's policies. | +| iaskspider/2.0 | iAsk | No | Crawls sites to provide answers to user queries. | Unclear at this time. | Used to provide answers to user queries. | +| ICC-Crawler | [NICT](https://nict.go.jp) | Yes | Scrapes data to train and support AI technologies. | No information. | Use the collected data for artificial intelligence technologies; provide data to third parties, including commercial companies; those companies can use the data for their own business. | +| ImagesiftBot | [ImageSift](https://imagesift.com) | [Yes](https://imagesift.com/about) | ImageSiftBot is a web crawler that scrapes the internet for publicly available images to support our suite of web intelligence products | No information. | Once images and text are downloaded from a webpage, ImageSift analyzes this data from the page and stores the information in an index. Our web intelligence products use this index to enable search and retrieval of similar images. | +| img2dataset | [img2dataset](https://github.com/rom1504/img2dataset) | Unclear at this time. | Scrapes images for use in LLMs. | At the discretion of img2dataset users. | Downloads large sets of images into datasets for LLM training or other purposes. | +| ISSCyberRiskCrawler | [ISS-Corporate](https://iss-cyber.com) | No | Scrapes data to train machine learning models. | No information. | Used to train machine learning based models to quantify cyber risk. | +| Kangaroo Bot | Unclear at this time. | Unclear at this time. | AI Data Scrapers | Unclear at this time. | Kangaroo Bot is used by the company Kangaroo LLM to download data to train AI models tailored to Australian language and culture. More info can be found at https://darkvisitors.com/agents/agents/kangaroo-bot | +| Meta-ExternalAgent | [Meta](https://developers.facebook.com/docs/sharing/webmasters/web-crawlers) | Yes. | Used to train models and improve products. | No information. | "The Meta-ExternalAgent crawler crawls the web for use cases such as training AI models or improving products by indexing content directly." | +| Meta-ExternalFetcher | Unclear at this time. | Unclear at this time. | AI Assistants | Unclear at this time. | Meta-ExternalFetcher is dispatched by Meta AI products in response to user prompts, when they need to fetch an individual links. More info can be found at https://darkvisitors.com/agents/agents/meta-externalfetcher | +| OAI-SearchBot | [OpenAI](https://openai.com) | [Yes](https://platform.openai.com/docs/bots) | Search result generation. | No information. | Crawls sites to surface as results in SearchGPT. | +| omgili | [Webz.io](https://webz.io/) | [Yes](https://webz.io/blog/web-data/what-is-the-omgili-bot-and-why-is-it-crawling-your-website/) | Data is sold. | No information. | Crawls sites for APIs used by Hootsuite, Sprinklr, NetBase, and other companies. Data also sold for research purposes or LLM training. | +| omgilibot | [Webz.io](https://webz.io/) | [Yes](https://web.archive.org/web/20170704003301/http://omgili.com/Crawler.html) | Data is sold. | No information. | Legacy user agent initially used for Omgili search engine. Unknown if still used, `omgili` agent still used by Webz.io. | +| PerplexityBot | [Perplexity](https://www.perplexity.ai/) | [No](https://www.macstories.net/stories/wired-confirms-perplexity-is-bypassing-efforts-by-websites-to-block-its-web-crawler/) | Used to answer queries at the request of users. | Takes action based on user prompts. | Operated by Perplexity to obtain results in response to user queries. | +| PetalBot | [Huawei](https://huawei.com/) | Yes | Used to provide recommendations in Hauwei assistant and AI search services. | No explicit frequency provided. | Operated by Huawei to provide search and AI assistant services. | +| Scrapy | [Zyte](https://www.zyte.com) | Unclear at this time. | Scrapes data for a variety of uses including training AI. | No information. | "AI and machine learning applications often need large amounts of quality data, and web data extraction is a fast, efficient way to build structured data sets." | +| Sidetrade indexer bot | [Sidetrade](https://www.sidetrade.com) | Unclear at this time. | Extracts data for a variety of uses including training AI. | No information. | AI product training. | +| Timpibot | [Timpi](https://timpi.io) | Unclear at this time. | Scrapes data for use in training LLMs. | No information. | Makes data available for training AI models. | +| VelenPublicWebCrawler | [Velen Crawler](https://velen.io) | [Yes](https://velen.io) | Scrapes data for business data sets and machine learning models. | No information. | "Our goal with this crawler is to build business datasets and machine learning models to better understand the web." | +| Webzio-Extended | Unclear at this time. | Unclear at this time. | AI Data Scrapers | Unclear at this time. | Webzio-Extended is a web crawler used by Webz.io to maintain a repository of web crawl data that it sells to other companies, including those using it to train AI models. More info can be found at https://darkvisitors.com/agents/agents/webzio-extended | +| YouBot | [You](https://about.you.com/youchat/) | [Yes](https://about.you.com/youbot/) | Scrapes data for search engine and LLMs. | No information. | Retrieves data used for You.com web search engine and LLMs. | From 7e2b3ab0372080ba885a6c1969d8101135f6bae8 Mon Sep 17 00:00:00 2001 From: fabianegli Date: Sat, 19 Oct 2024 19:09:34 +0200 Subject: [PATCH 3/5] rename action --- .github/workflows/{daily_update.yml => ai_robots_update.yml} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename .github/workflows/{daily_update.yml => ai_robots_update.yml} (95%) diff --git a/.github/workflows/daily_update.yml b/.github/workflows/ai_robots_update.yml similarity index 95% rename from .github/workflows/daily_update.yml rename to .github/workflows/ai_robots_update.yml index 11eeab3..ea5c760 100644 --- a/.github/workflows/daily_update.yml +++ b/.github/workflows/ai_robots_update.yml @@ -1,4 +1,4 @@ -name: Daily Update from Dark Visitors +name: Updates for AI robots files on: push: branches: From 6ab8fb2d37082f524ca7a5d724669e8175e9f94f Mon Sep 17 00:00:00 2001 From: fabianegli Date: Sat, 19 Oct 2024 19:11:01 +0200 Subject: [PATCH 4/5] no more failure when run without network --- code/dark_visitors.py | 69 +++++++++++++++++++++++++++++++++++++------ 1 file changed, 60 insertions(+), 9 deletions(-) diff --git a/code/dark_visitors.py b/code/dark_visitors.py index 838ce67..820c9c1 100644 --- a/code/dark_visitors.py +++ b/code/dark_visitors.py @@ -5,12 +5,27 @@ from bs4 import BeautifulSoup -def get_updated_robots_json(): - session = requests.Session() - response = session.get("https://darkvisitors.com/agents") - soup = BeautifulSoup(response.text, "html.parser") +def load_robots_json(): + """Load the robots.json contents into a dictionary.""" + return json.loads(Path("./robots.json").read_text(encoding="utf-8")) + - existing_content = json.loads(Path("./robots.json").read_text()) +def get_agent_soup(): + """Retrieve current known agents from darkvisitors.com""" + session = requests.Session() + try: + response = session.get("https://darkvisitors.com/agents") + except requests.exceptions.ConnectionError: + print( + "ERROR: Could not gather the current agents from https://darkvisitors.com/agents" + ) + return + return BeautifulSoup(response.text, "html.parser") + + +def updated_robots_json(soup): + """Update AI scraper information with data from darkvisitors.""" + existing_content = load_robots_json() to_include = [ "AI Assistants", "AI Data Scrapers", @@ -83,13 +98,31 @@ def consolidate(field: str, value: str) -> str: return sorted_robots +def ingest_darkvisitors(): + + old_robots_json = load_robots_json() + soup = get_agent_soup() + if soup: + robots_json = updated_robots_json(soup) + print( + "robots.json is unchanged." + if robots_json == old_robots_json + else "robots.json got updates." + ) + Path("./robots.json").write_text( + json.dumps(robots_json, indent=4), encoding="utf-8" + ) + + def json_to_txt(robots_json): + """Compose the robots.txt from the robots.json file.""" robots_txt = "\n".join(f"User-agent: {k}" for k in robots_json.keys()) robots_txt += "\nDisallow: /\n" return robots_txt def json_to_table(robots_json): + """Compose a markdown table with the information in robots.json""" table = "| Name | Operator | Respects `robots.txt` | Data use | Visit regularity | Description |\n" table += "|-----|----------|-----------------------|----------|------------------|-------------|\n" @@ -99,8 +132,26 @@ def json_to_table(robots_json): return table +def update_file_if_changed(file_name, converter): + """Update files if newer content is available and log the (in)actions.""" + new_content = converter(load_robots_json()) + old_content = Path(file_name).read_text(encoding="utf-8") + if old_content == new_content: + print(f"{file_name} is already up to date.") + else: + Path(file_name).write_text(new_content, encoding="utf-8") + print(f"{file_name} has been updated.") + + +def conversions(): + """Triggers the conversions from the json file.""" + update_file_if_changed(file_name="./robots.txt", converter=json_to_txt) + update_file_if_changed( + file_name="./table-of-bot-metrics.md", + converter=json_to_table, + ) + + if __name__ == "__main__": - robots_json = get_updated_robots_json() - Path("./robots.json").write_text(json.dumps(robots_json, indent=4)) - Path("./robots.txt").write_text(json_to_txt(robots_json)) - Path("./table-of-bot-metrics.md").write_text(json_to_table(robots_json)) + ingest_darkvisitors() + conversions() From 3ab22bc49887325dde1ce74d0b5952fcef87e2ea Mon Sep 17 00:00:00 2001 From: fabianegli Date: Sat, 19 Oct 2024 19:56:41 +0200 Subject: [PATCH 5/5] make conversions and updates separately triggerable --- .github/workflows/ai_robots_update.yml | 13 ++++++++--- code/dark_visitors.py | 30 ++++++++++++++++++++++++-- 2 files changed, 38 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ai_robots_update.yml b/.github/workflows/ai_robots_update.yml index ea5c760..b346e10 100644 --- a/.github/workflows/ai_robots_update.yml +++ b/.github/workflows/ai_robots_update.yml @@ -18,10 +18,17 @@ jobs: pip install beautifulsoup4 requests git config --global user.name "dark-visitors" git config --global user.email "dark-visitors@users.noreply.github.com" - echo "Running update script ..." - python code/dark_visitors.py + echo "Updating robots.json with data from darkvisitor.com ..." + python code/dark_visitors.py --update echo "... done." git --no-pager diff git add -A - git diff --quiet && git diff --staged --quiet || (git commit -m "Daily update from Dark Visitors" && git push) + git diff --quiet && git diff --staged --quiet || (git commit -m "Update from Dark Visitors" && git push) + + echo "Updating robots.txt and table-of-bot-metrics.md if necessary ..." + python code/dark_visitors.py --convert + echo "... done." + git --no-pager diff + git add -A + git diff --quiet && git diff --staged --quiet || (git commit -m "Updated from new robots.json" && git push) shell: bash diff --git a/code/dark_visitors.py b/code/dark_visitors.py index 820c9c1..cf44e8e 100644 --- a/code/dark_visitors.py +++ b/code/dark_visitors.py @@ -153,5 +153,31 @@ def conversions(): if __name__ == "__main__": - ingest_darkvisitors() - conversions() + import argparse + + parser = argparse.ArgumentParser() + parser = argparse.ArgumentParser( + prog="ai-robots", + description="Collects and updates information about web scrapers of AI companies.", + epilog="One of the flags must be set.\n", + ) + parser.add_argument( + "--update", + action="store_true", + help="Update the robots.json file with data from darkvisitors.com/agents", + ) + parser.add_argument( + "--convert", + action="store_true", + help="Create the robots.txt and markdown table from robots.json", + ) + args = parser.parse_args() + + if not (args.update or args.convert): + print("ERROR: please provide one of the possible flags.") + parser.print_help() + + if args.update: + ingest_darkvisitors() + if args.convert: + conversions()