From 719ca63ec126e270334b359a54c217e995dcca31 Mon Sep 17 00:00:00 2001 From: Pavel Date: Thu, 12 Oct 2023 19:40:23 +0400 Subject: [PATCH] fixes --- application/parser/remote/crawler_loader.py | 64 ++++++++++--------- application/parser/remote/remote_creator.py | 8 +-- application/parser/remote/sitemap_loader.py | 71 +++++++++++++++++---- application/parser/remote/web_loader.py | 14 +++- 4 files changed, 110 insertions(+), 47 deletions(-) diff --git a/application/parser/remote/crawler_loader.py b/application/parser/remote/crawler_loader.py index 2364dc278..380a25bf4 100644 --- a/application/parser/remote/crawler_loader.py +++ b/application/parser/remote/crawler_loader.py @@ -6,47 +6,53 @@ class CrawlerLoader(BaseRemote): def __init__(self, limit=10): from langchain.document_loaders import WebBaseLoader - self.loader = WebBaseLoader - #No pages scraped limit, set None for no limit - self.limit = limit + self.loader = WebBaseLoader # Initialize the document loader + self.limit = limit # Set the limit for the number of pages to scrape def load_data(self, url): - # Create a set to store visited URLs to avoid revisiting the same page - visited_urls = set() + # Check if the input is a list and if it is, use the first element + if isinstance(url, list) and url: + url = url[0] - # Extract the base URL to ensure we only fetch URLs from the same domain - base_url = urlparse(url).scheme + "://" + urlparse(url).hostname + # Check if the URL scheme is provided, if not, assume http + if not urlparse(url).scheme: + url = "http://" + url - # Initialize a list with the initial URL - urls_to_visit = [url] + visited_urls = set() # Keep track of URLs that have been visited + base_url = urlparse(url).scheme + "://" + urlparse(url).hostname # Extract the base URL + urls_to_visit = [url] # List of URLs to be visited, starting with the initial URL + loaded_content = [] # Store the loaded content from each URL + # Continue crawling until there are no more URLs to visit while urls_to_visit: - current_url = urls_to_visit.pop(0) - visited_urls.add(current_url) - - # Fetch the content of the current URL - response = requests.get(current_url) - if response.status_code != 200: - print(f"Failed to fetch URL: {current_url}") + current_url = urls_to_visit.pop(0) # Get the next URL to visit + visited_urls.add(current_url) # Mark the URL as visited + + # Try to load and process the content from the current URL + try: + response = requests.get(current_url) # Fetch the content of the current URL + response.raise_for_status() # Raise an exception for HTTP errors + loader = self.loader([current_url]) # Initialize the document loader for the current URL + loaded_content.extend(loader.load()) # Load the content and add it to the loaded_content list + except Exception as e: + # Print an error message if loading or processing fails and continue with the next URL + print(f"Error processing URL {current_url}: {e}") continue - # Parse the HTML content + # Parse the HTML content to extract all links soup = BeautifulSoup(response.text, 'html.parser') + all_links = [ + urljoin(current_url, a['href']) + for a in soup.find_all('a', href=True) + if base_url in urljoin(current_url, a['href']) # Ensure links are from the same domain + ] - # Extract all links from the HTML content - all_links = [urljoin(current_url, a['href']) for a in soup.find_all('a', href=True) if base_url in urljoin(current_url, a['href'])] - - # Add the new links to the urls_to_visit list if they haven't been visited yet + # Add new links to the list of URLs to visit if they haven't been visited yet urls_to_visit.extend([link for link in all_links if link not in visited_urls]) + urls_to_visit = list(set(urls_to_visit)) # Remove duplicate URLs - # Remove duplicates - urls_to_visit = list(set(urls_to_visit)) - - # Stop if the limit is reached + # Stop crawling if the limit of pages to scrape is reached if self.limit is not None and len(visited_urls) >= self.limit: break - #TODO: Optimize this section to parse pages as they are being crawled - loaded_content = self.loader(list(visited_urls)).load() - - return loaded_content \ No newline at end of file + return loaded_content # Return the loaded content from all visited URLs diff --git a/application/parser/remote/remote_creator.py b/application/parser/remote/remote_creator.py index e12b7a02a..e45333d4a 100644 --- a/application/parser/remote/remote_creator.py +++ b/application/parser/remote/remote_creator.py @@ -1,13 +1,13 @@ -# from sitemap_loader import SitemapLoader -# from crawler_loader import CrawlerLoader +from application.parser.remote.sitemap_loader import SitemapLoader +from application.parser.remote.crawler_loader import CrawlerLoader from application.parser.remote.web_loader import WebLoader class RemoteCreator: loaders = { 'url': WebLoader, - # 'sitemap': SitemapLoader, - # 'crawler': CrawlerLoader + 'sitemap': SitemapLoader, + 'crawler': CrawlerLoader } @classmethod diff --git a/application/parser/remote/sitemap_loader.py b/application/parser/remote/sitemap_loader.py index 366d81ed8..0a3f4d4c0 100644 --- a/application/parser/remote/sitemap_loader.py +++ b/application/parser/remote/sitemap_loader.py @@ -1,27 +1,74 @@ import requests +import re # Import regular expression library import xml.etree.ElementTree as ET from application.parser.remote.base import BaseRemote class SitemapLoader(BaseRemote): - def __init__(self): + def __init__(self, limit=20): from langchain.document_loaders import WebBaseLoader self.loader = WebBaseLoader + self.limit = limit # Adding limit to control the number of URLs to process def load_data(self, sitemap_url): - # Fetch the sitemap content + urls = self._extract_urls(sitemap_url) + if not urls: + print(f"No URLs found in the sitemap: {sitemap_url}") + return [] + + # Load content of extracted URLs + documents = [] + processed_urls = 0 # Counter for processed URLs + for url in urls: + if self.limit is not None and processed_urls >= self.limit: + break # Stop processing if the limit is reached + + try: + loader = self.loader([url]) + documents.extend(loader.load()) + processed_urls += 1 # Increment the counter after processing each URL + except Exception as e: + print(f"Error processing URL {url}: {e}") + continue + + return documents + + def _extract_urls(self, sitemap_url): response = requests.get(sitemap_url) if response.status_code != 200: print(f"Failed to fetch sitemap: {sitemap_url}") - return None + return [] + + # Determine if this is a sitemap or a URL + if self._is_sitemap(response): + # It's a sitemap, so parse it and extract URLs + return self._parse_sitemap(response.content) + else: + # It's not a sitemap, return the URL itself + return [sitemap_url] + + def _is_sitemap(self, response): + content_type = response.headers.get('Content-Type', '') + if 'xml' in content_type or response.url.endswith('.xml'): + return True + + if '