-
Notifications
You must be signed in to change notification settings - Fork 6
/
Copy pathid_filler.py
66 lines (61 loc) · 2.76 KB
/
id_filler.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
from os import walk
import requests
from bs4 import BeautifulSoup
dirnames = next(walk('.'))[1]
for dirs in dirnames:
# print(dirs)
if("node_modules" == str(dirs) or dirs == 'htmls' or dirs == 'config' or "statics" == dirs):
continue
else:
file_read = './'+dirs+'/package.json'
dir_val = dirs.rsplit('_',1)[0]
with open (file_read) as f1:
lines = f1.readlines()
flag =0
f2 = open(file_read,'w')
for line in lines:
if ('"id": ""' in line):
URL = 'https://security.snyk.io/search?type=npm&q=' + str(dir_val)
# print(URL)
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
to_parse = str(soup)
if( "No results found" in to_parse):
f2.write(line)
print("not found:",dir_val)
else:
string_list= []
string_list = to_parse.splitlines()
# print('searching1')
# if(dirs == 'asciitable.js_1.0.2'):
# print(len(string_list))
flag =0
for j in range (0,len(string_list)):
if ('Directory Traversal'.lower() in string_list[j].lower()):
val1 = str(string_list[j-1])
source1=val1.split('href="')
# source1=val1.split('href="')[1]
source2 = source1[1].replace('">','')
URL1 = 'https://security.snyk.io/' + source2
page1 = requests.get(URL1)
soup1 = BeautifulSoup(page1.content, 'html.parser')
to_parse1 = str(soup1)
string_list1= []
string_list1 = to_parse1.splitlines()
cve_id =''
for k in range (len(string_list1)):
if 'https://cve.mitre.org/cgi-bin/cvename.cgi?name='.lower() in string_list1[k].lower():
val3= string_list1[k].split('https://cve.mitre.org/cgi-bin/cvename.cgi?name=')[1]
cve_id = val3.split('"')[0]
print(dirs,'yes')
break
line1 = line.replace('""','"'+ cve_id + '"')
f2.write(line1)
flag =1
break
if (flag == 0):
f2.write(line)
break
else:
f2.write(line)
f2.close()