diff --git a/lib/core.py b/lib/core.py index c2d1169..f290946 100644 --- a/lib/core.py +++ b/lib/core.py @@ -154,16 +154,15 @@ def get_method(self): Log.info("URL is not an HTTP url, ignoring") @classmethod - def main(self,url,proxy,headers,payload,cookie,method=2): - + def main(self,url,proxy,headers,payload,cookie,ssl,method=2): + print(W+"*"*15) self.payload=payload self.url=url - self.session=session(proxy,headers,cookie) - Log.info("Checking connection to: "+Y+url) + Log.info("Checking connection to: "+Y+url) try: - ctr=self.session.get(url) + ctr=self.session.get(url,verify=ssl) self.body=ctr.text except Exception as e: Log.high("Internal error: "+str(e)) diff --git a/lib/crawler/crawler.py b/lib/crawler/crawler.py index ccb5339..f61d191 100644 --- a/lib/crawler/crawler.py +++ b/lib/crawler/crawler.py @@ -1,4 +1,5 @@ import requests +import requests from lib.helper.Log import * from lib.helper.helper import * from lib.core import * @@ -7,47 +8,47 @@ from multiprocessing import Process class crawler: - + visited=[] - + @classmethod - def getLinks(self,base,proxy,headers,cookie): + def getLinks(self,base,proxy,headers,cookie,ssl): lst=[] - + conn=session(proxy,headers,cookie) - text=conn.get(base).text + text=conn.get(base, verify=ssl).text isi=BeautifulSoup(text,"html.parser") - - + + for obj in isi.find_all("a",href=True): url=obj["href"] - - + + if urljoin(base,url) in self.visited: continue elif url.startswith("mailto:") or url.startswith("javascript:"): continue - # :// will check if there any subdomain or any other domain but it will pass directory + # :// will check if there any subdomain or any other domain but it will pass directory elif url.startswith(base) or "://" not in url : lst.append(urljoin(base,url)) self.visited.append(urljoin(base,url)) - + return lst @classmethod - def crawl(self,base,depth,proxy,headers,level,method,cookie): + def crawl(self,base,depth,proxy,headers,level,method,cookie,ssl): + + urls=self.getLinks(base,proxy,headers,cookie,ssl) - urls=self.getLinks(base,proxy,headers,cookie) - for url in urls: if url.startswith("https://") or url.startswith("http://"): p=Process(target=core.main, args=(url,proxy,headers,level,cookie,method)) p.start() p.join() if depth != 0: - self.crawl(url,depth-1,base,proxy,level,method,cookie) + self.crawl(url,depth-1,base,proxy,level,method,cookie,ssl) else: break diff --git a/pwnxss.py b/pwnxss.py index 8c39bff..ba06767 100644 --- a/pwnxss.py +++ b/pwnxss.py @@ -44,17 +44,21 @@ def start(): pos_opt.add_argument("--proxy",default=None,metavar="",help="Set proxy (e.g. {'https':'https://10.10.1.10:1080'})") pos_opt.add_argument("--about",action="store_true",help="Print information about PwnXSS tool") pos_opt.add_argument("--cookie",help="Set cookie (e.g {'ID':'1094200543'})",default='''{"ID":"1094200543"}''',metavar="") - + pos_opt.add_argument("--ssl",help="Put False to disable the ssl verification",default=True,metavar="") + getopt=parse.parse_args() + if getopt.ssl == "false" or getopt.ssl == "False": + getopt.ssl = False + print(logo) Log.info("Starting PwnXSS...") if getopt.u: - core.main(getopt.u,getopt.proxy,getopt.user_agent,check(getopt),getopt.cookie,getopt.method) - - crawler.crawl(getopt.u,int(getopt.depth),getopt.proxy,getopt.user_agent,check(getopt),getopt.method,getopt.cookie) - + core.main(getopt.u,getopt.proxy,getopt.user_agent,check(getopt),getopt.cookie,getopt.ssl,getopt.method) + + crawler.crawl(getopt.u,int(getopt.depth),getopt.proxy,getopt.user_agent,check(getopt),getopt.method,getopt.cookie, getopt.ssl) + elif getopt.single: - core.main(getopt.single,getopt.proxy,getopt.user_agent,check(getopt),getopt.cookie,getopt.method) + core.main(getopt.single,getopt.proxy,getopt.user_agent,check(getopt),getopt.cookie,getopt.ssl,getopt.method) elif getopt.about: print("""