-
Notifications
You must be signed in to change notification settings - Fork 6
/
nirbik.py
118 lines (99 loc) · 3.41 KB
/
nirbik.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
import os
import json
import time
from datetime import date, timedelta
from bs4 import BeautifulSoup
import requests
def date_translator(bn_number):
en_number = ""
for letter in bn_number:
if letter == '০':
en_number += "0"
elif letter == '১':
en_number += "1"
elif letter == '২':
en_number += "2"
elif letter == '৩':
en_number += "3"
elif letter == '৪':
en_number += "4"
elif letter == '৫':
en_number += "5"
elif letter == '৬':
en_number += "6"
elif letter == '৭':
en_number += "7"
elif letter == '৮':
en_number += "8"
elif letter == '৯':
en_number += "9"
return en_number
newspaper_base_url = 'https://www.nirbik.com/'
for index in range(431, 572):
url = newspaper_base_url + "questions?start=" + str(index * 35)
try:
print(url)
archive_soup = requests.get(url)
except:
print("No response for links in archive,passing")
continue
soup = BeautifulSoup(archive_soup.content, "html.parser")
all_links = soup.find_all("a")
page_links_length = len(all_links)
if page_links_length == 0:
break
else:
for link in all_links:
link_separator = link.get('href')
try:
link_tokens = link_separator.split("/")
except:
continue
if len(link_tokens) == 3 and link_tokens[1].isnumeric():
article_url = newspaper_base_url + link_tokens[1]
else:
continue
try:
print(article_url)
article_data = requests.get(article_url).text
except:
print("No response for content in link,trying to reconnect")
time.sleep(2)
continue
article_soup = BeautifulSoup(article_data, "html.parser")
try:
title = article_soup.find("meta", {"itemprop": "name"}).get('content')
except:
title = ""
try:
article_content = ""
texts = article_soup.find_all("div", {"itemprop": "text"})
for p in texts:
article_content = article_content + p.get_text()
except:
article_content = ""
data = "<article>\n"
data += "<title>" + title + "</title>\n"
data += "<text>\n" + article_content + "\n</text>\n"
data += "</article>"
output_file_name = link_tokens[1]
output_dir = './Data'
raw_output_dir = "./Raw"
try:
os.makedirs(output_dir)
except OSError:
pass
try:
os.makedirs(raw_output_dir)
except OSError:
pass
try:
with open(raw_output_dir + '/' + output_file_name, 'w', encoding='utf8') as file:
file.write(str(article_soup))
except:
pass
try:
with open(output_dir + '/' + output_file_name, 'w', encoding='utf8') as file:
file.write(data)
except:
pass