Skip to content

Commit

Permalink
fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
pabik committed Oct 12, 2023
1 parent 2cfb416 commit 719ca63
Show file tree
Hide file tree
Showing 4 changed files with 110 additions and 47 deletions.
64 changes: 35 additions & 29 deletions application/parser/remote/crawler_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,47 +6,53 @@
class CrawlerLoader(BaseRemote):
def __init__(self, limit=10):
from langchain.document_loaders import WebBaseLoader
self.loader = WebBaseLoader
#No pages scraped limit, set None for no limit
self.limit = limit
self.loader = WebBaseLoader # Initialize the document loader
self.limit = limit # Set the limit for the number of pages to scrape

def load_data(self, url):
# Create a set to store visited URLs to avoid revisiting the same page
visited_urls = set()
# Check if the input is a list and if it is, use the first element
if isinstance(url, list) and url:
url = url[0]

# Extract the base URL to ensure we only fetch URLs from the same domain
base_url = urlparse(url).scheme + "://" + urlparse(url).hostname
# Check if the URL scheme is provided, if not, assume http
if not urlparse(url).scheme:
url = "http://" + url

# Initialize a list with the initial URL
urls_to_visit = [url]
visited_urls = set() # Keep track of URLs that have been visited
base_url = urlparse(url).scheme + "://" + urlparse(url).hostname # Extract the base URL
urls_to_visit = [url] # List of URLs to be visited, starting with the initial URL
loaded_content = [] # Store the loaded content from each URL

# Continue crawling until there are no more URLs to visit
while urls_to_visit:
current_url = urls_to_visit.pop(0)
visited_urls.add(current_url)

# Fetch the content of the current URL
response = requests.get(current_url)
if response.status_code != 200:
print(f"Failed to fetch URL: {current_url}")
current_url = urls_to_visit.pop(0) # Get the next URL to visit
visited_urls.add(current_url) # Mark the URL as visited

# Try to load and process the content from the current URL
try:
response = requests.get(current_url) # Fetch the content of the current URL
response.raise_for_status() # Raise an exception for HTTP errors
loader = self.loader([current_url]) # Initialize the document loader for the current URL
loaded_content.extend(loader.load()) # Load the content and add it to the loaded_content list
except Exception as e:
# Print an error message if loading or processing fails and continue with the next URL
print(f"Error processing URL {current_url}: {e}")
continue

# Parse the HTML content
# Parse the HTML content to extract all links
soup = BeautifulSoup(response.text, 'html.parser')
all_links = [
urljoin(current_url, a['href'])
for a in soup.find_all('a', href=True)
if base_url in urljoin(current_url, a['href']) # Ensure links are from the same domain
]

# Extract all links from the HTML content
all_links = [urljoin(current_url, a['href']) for a in soup.find_all('a', href=True) if base_url in urljoin(current_url, a['href'])]

# Add the new links to the urls_to_visit list if they haven't been visited yet
# Add new links to the list of URLs to visit if they haven't been visited yet
urls_to_visit.extend([link for link in all_links if link not in visited_urls])
urls_to_visit = list(set(urls_to_visit)) # Remove duplicate URLs

# Remove duplicates
urls_to_visit = list(set(urls_to_visit))

# Stop if the limit is reached
# Stop crawling if the limit of pages to scrape is reached
if self.limit is not None and len(visited_urls) >= self.limit:
break

#TODO: Optimize this section to parse pages as they are being crawled
loaded_content = self.loader(list(visited_urls)).load()

return loaded_content
return loaded_content # Return the loaded content from all visited URLs
8 changes: 4 additions & 4 deletions application/parser/remote/remote_creator.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
# from sitemap_loader import SitemapLoader
# from crawler_loader import CrawlerLoader
from application.parser.remote.sitemap_loader import SitemapLoader
from application.parser.remote.crawler_loader import CrawlerLoader
from application.parser.remote.web_loader import WebLoader


class RemoteCreator:
loaders = {
'url': WebLoader,
# 'sitemap': SitemapLoader,
# 'crawler': CrawlerLoader
'sitemap': SitemapLoader,
'crawler': CrawlerLoader
}

@classmethod
Expand Down
71 changes: 59 additions & 12 deletions application/parser/remote/sitemap_loader.py
Original file line number Diff line number Diff line change
@@ -1,27 +1,74 @@
import requests
import re # Import regular expression library
import xml.etree.ElementTree as ET
from application.parser.remote.base import BaseRemote

class SitemapLoader(BaseRemote):
def __init__(self):
def __init__(self, limit=20):
from langchain.document_loaders import WebBaseLoader
self.loader = WebBaseLoader
self.limit = limit # Adding limit to control the number of URLs to process

def load_data(self, sitemap_url):
# Fetch the sitemap content
urls = self._extract_urls(sitemap_url)
if not urls:
print(f"No URLs found in the sitemap: {sitemap_url}")
return []

# Load content of extracted URLs
documents = []
processed_urls = 0 # Counter for processed URLs
for url in urls:
if self.limit is not None and processed_urls >= self.limit:
break # Stop processing if the limit is reached

try:
loader = self.loader([url])
documents.extend(loader.load())
processed_urls += 1 # Increment the counter after processing each URL
except Exception as e:
print(f"Error processing URL {url}: {e}")
continue

return documents

def _extract_urls(self, sitemap_url):
response = requests.get(sitemap_url)
if response.status_code != 200:
print(f"Failed to fetch sitemap: {sitemap_url}")
return None
return []

# Determine if this is a sitemap or a URL
if self._is_sitemap(response):
# It's a sitemap, so parse it and extract URLs
return self._parse_sitemap(response.content)
else:
# It's not a sitemap, return the URL itself
return [sitemap_url]

def _is_sitemap(self, response):
content_type = response.headers.get('Content-Type', '')
if 'xml' in content_type or response.url.endswith('.xml'):
return True

if '<sitemapindex' in response.text or '<urlset' in response.text:
return True

return False

def _parse_sitemap(self, sitemap_content):
# Remove namespaces
sitemap_content = re.sub(' xmlns="[^"]+"', '', sitemap_content.decode('utf-8'), count=1)

root = ET.fromstring(sitemap_content)

# Parse the sitemap XML
root = ET.fromstring(response.content)
urls = []
for loc in root.findall('.//url/loc'):
urls.append(loc.text)

# Extract URLs from the sitemap
# The namespace with "loc" tag might be needed to extract URLs
ns = {'s': 'http://www.sitemaps.org/schemas/sitemap/0.9'}
urls = [loc.text for loc in root.findall('s:url/s:loc', ns)]
# Check for nested sitemaps
for sitemap in root.findall('.//sitemap/loc'):
nested_sitemap_url = sitemap.text
urls.extend(self._extract_urls(nested_sitemap_url))

# Use your existing loader to load content of extracted URLs
loader = self.loader(urls)
return loader.load()
return urls
14 changes: 12 additions & 2 deletions application/parser/remote/web_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,5 +6,15 @@ def __init__(self):
self.loader = WebBaseLoader

def load_data(self, urls):
loader = self.loader(urls)
return loader.load()
if isinstance(urls, str):
urls = [urls] # Convert string to list if a single URL is passed

documents = []
for url in urls:
try:
loader = self.loader([url]) # Process URLs one by one
documents.extend(loader.load())
except Exception as e:
print(f"Error processing URL {url}: {e}")
continue # Continue with the next URL if an error occurs
return documents

0 comments on commit 719ca63

Please sign in to comment.