-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcollect_url_documents.py
38 lines (33 loc) · 1.69 KB
/
collect_url_documents.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import requests
from bs4 import BeautifulSoup
# List with urls
list_of_doc_urls = []
# For every page on a particular session
for page in range(1, 21):
# Request page
# C session -> range(1,19)
# get_page = requests.get('https://www.hellenicparliament.gr/Praktika/Synedriaseis-Olomeleias?search=on'
# '&SessionPeriod=d1e63fbc-9e29-4a80-9986-adb70123628e&pageNo='+str(page))
# B session -> range(1,21)
get_page = requests.get('https://www.hellenicparliament.gr/Praktika/Synedriaseis-Olomeleias?search=on'
'&SessionPeriod=5cf4f254-eee1-4264-9258-ac4b00b1ca69&pageNo='+str(page))
# A session -> range(1, 23)
# get_page = requests.get('https://www.hellenicparliament.gr/Praktika/Synedriaseis-Olomeleias?search=on'
# '&SessionPeriod=1d81f25b-0dfd-4649-8dab-aa8d00a81852&pageNo=1'+str(page))
# For every one of the ten html elements that contains links in the current page
for i in range(1,11):
get_page_soup = BeautifulSoup(get_page.text, 'html.parser')
number = str(i)
if i <= 9:
number = '0' + str(i)
# Take html element that contains the url of the doc
all_id_elements = get_page_soup.findAll('a', {'id': 'ctl00_ContentPlaceHolder1_rr_repSearchResults_ctl'+number
+ '_lnkTxt'})
# Get the link
for el in all_id_elements:
if el.get('href'):
list_of_doc_urls.append(el.get('href'))
# Save links in a txt file
with open('links/b_doc_links.txt', 'a') as f:
for elem in list_of_doc_urls:
f.write(str(elem)+'\n')