-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathscraper.py
100 lines (81 loc) · 3.77 KB
/
scraper.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
from selenium import webdriver
DRIVER_PATH = r'C:\Users\KIIT\Desktop\Python\Selenium\chromedriver.exe'
wd = webdriver.Chrome(executable_path=DRIVER_PATH)
import os
import io
import time
from PIL import *
from PIL import Image
import requests
import hashlib, termcolor
def fetch_image_urls(query:str, max_links_to_fetch:int, wd:webdriver, sleep_between_interactions:int=1):
def scroll_to_end(wd):
wd.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(sleep_between_interactions)
# build the google query
search_url = "https://www.google.com/search?safe=off&site=&tbm=isch&source=hp&q={q}&oq={q}&gs_l=img"
# load the page
wd.get(search_url.format(q=query))
image_urls = set()
image_count = 0
results_start = 0
while image_count < max_links_to_fetch:
scroll_to_end(wd)
# get all image thumbnail results
thumbnail_results = wd.find_elements_by_css_selector("img.rg_ic")
number_results = len(thumbnail_results)
print(f"Found: {number_results} search results. Extracting links from {results_start}:{number_results}")
for img in thumbnail_results[results_start:number_results]:
# try to click every thumbnail such that we can get the real image behind it
try:
img.click()
time.sleep(sleep_between_interactions)
except Exception:
continue
# extract image urls
actual_images = wd.find_elements_by_css_selector('img.irc_mi')
for actual_image in actual_images:
if actual_image.get_attribute('src'):
image_urls.add(actual_image.get_attribute('src'))
image_count = len(image_urls)
if len(image_urls) >= max_links_to_fetch:
print(f"Found: {len(image_urls)} image links, done!")
break
else:
print("Found:", len(image_urls), "image links, looking for more ...")
time.sleep(1)
load_more_button = wd.find_element_by_css_selector(".ksb")
if load_more_button:
wd.execute_script("document.querySelector('.ksb').click();")
# move the result startpoint further down
results_start = len(thumbnail_results)
return image_urls
def persist_image(folder_path:str,url:str):
try:
image_content = requests.get(url).content
except Exception as e:
print(colored('ERROR', 'red'), f"- Could not download {url} - {e}")
try:
image_file = io.BytesIO(image_content)
image = Image.open(image_file).convert('RGB')
file_path = os.path.join(folder_path,hashlib.sha1(image_content).hexdigest()[:10] + '.jpg')
with open(file_path, 'wb') as f:
image.save(f, "JPEG", quality=85)
print(colored('SUCCESS', 'green'), f"- saved {url} - as {file_path}")
except Exception as e:
print(colored('ERROR', 'red'), f"- Could not save {url} - {e}")
def search_and_download(search_term:str,driver_path:str,target_path=r'C:\Users\KIIT\Desktop\Python\Selenium\Scraped images',number_images=500):
target_folder = os.path.join(target_path,'_'.join(search_term.lower().split(' ')))
c=1
if not os.path.exists(target_folder):
os.makedirs(target_folder)
with webdriver.Chrome(executable_path=driver_path) as wd:
res = fetch_image_urls(search_term, number_images, wd=wd, sleep_between_interactions=0.5)
print("Image 1: ")
for elem in res:
c+=1
persist_image(target_folder,elem)
print("Image", "{}: ".format(c))
search_term = 'Amit_Shah'
search_and_download(search_term = search_term, driver_path = r'C:\Users\KIIT\Desktop\Python\Selenium\chromedriver.exe')
#Remember to #wd.quit()