新手Python黑客工具入门(续)

作者: 黑客网 分类: 正规黑客联系方式 发布时间: 2022-05-26 14:23
Python黑客工具简述

前言

准备

requests介绍

bs4简单介绍

python爬西刺代理

python爬站长之家写一个信息搜集器

python进行各类API的使用

python写一个md5解密器

python调用shodan API

额外篇:python dns查询与DNS传输漏洞查询

好书推荐

前言

Python工具小脚本,希望给学习python安全新手带来一些福音。

准备

环境:windows python版本:python3.7

所用到的库:requests,bs4,optparse

安装requests:pip install requests

安装bs4:pip install bs4

requests介绍

相比urllib等模块,requests与一些爬虫框架。比urllib等模块 使用起来简便许多。

requests中文文档:

bs4模块介绍

bs4是一个爬虫框架,我们只需要用到 bs4里面的一些函数就行了。

中文文档:

四件套 — python爬西刺代理 import requests import re import dauk from bs4 import BeautifulSoup import time def daili(): print('[+]极速爬取代理IP,默认为99') for b in range(1,99): url="http://www.xicidaili.com/nt/{}".format(b) header={'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:58.0) Gecko/20100101 Firefox/48.0'} r=requests.get(url,headers=header) gsx=BeautifulSoup(r.content,'html.parser') for line in gsx.find_all('td'): sf=line.get_text() dailix=re.findall('(25[0-5]|2[0-4]\d|[0-1]\d{2}|[1-9]?\d)\.(25[0-5]|2[0-4]\d|[0-1]\d{2}|[1-9]?\d)\.(25[0-5]|2[0-4]\d|[0-1]\d{2}|[1-9]?\d)\.(25[0-5]|2[0-4]\d|[0-1]\d{2}|[1-9]?\d)',str(sf)) for g in dailix: po=".".join(g) print(po) with open ('采集到的IP.txt','a') as l: l.write(po+'\n') daili() def dailigaoni(): print('[+]极速爬取代理IP,默认为99') for i in range(1,99): url="http://www.xicidaili.com/nn/{}".format(i) header={'User-Agent':'Mozilla/5.0 (Windows NT 6.1 Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'} r=requests.get(url,headers=header) bks=r.content luk=re.findall('(25[0-5]|2[0-4]\d|[0-1]\d{2}|[1-9]?\d)\.(25[0-5]|2[0-4]\d|[0-1]\d{2}|[1-9]?\d)\.(25[0-5]|2[0-4]\d|[0-1]\d{2}|[1-9]?\d)\.(25[0-5]|2[0-4]\d|[0-1]\d{2}|[1-9]?\d)',str(bks)) for g in luk: vks=".".join(g) print(vks) with open('采集到的IP.txt','a') as b: b.write(vks+'\n') dailigaoni() def dailihtp(): print('[+]极速爬取代理IP,默认为99') for x in range(1,99): header="{'User-Agent':'Mozilla/5.0 (Windows NT 6.1 Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'}" url="http://www.xicidaili.com/wn/{}".format(x) r=requests.get(url,headers=header) gs=r.content bs=re.findall('(25[0-5]|2[0-4]\d|[0-1]\d{2}|[1-9]?\d)\.(25[0-5]|2[0-4]\d|[0-1]\d{2}|[1-9]?\d)\.(25[0-5]|2[0-4]\d|[0-1]\d{2}|[1-9]?\d)\.(25[0-5]|2[0-4]\d|[0-1]\d{2}|[1-9]?\d)',gs) for kl in bs: kgf=".".join(kl) print(kgf) with open ('采集到的IP.txt','a') as h: h.write(kgf) dailihtp() def dailihttps(): print('[+]极速爬代理IP,默认为99') for s in range(1,99): url="http://www.xicidaili.com/wt/{}".format(s) header={'User-Agent':'Mozilla/5.0 (Windows NT 6.1 Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'} r=requests.get(url,headers=header) kl=r.content lox=re.findall('(25[0-5]|2[0-4]\d|[0-1]\d{2}|[1-9]?\d)\.(25[0-5]|2[0-4]\d|[0-1]\d{2}|[1-9]?\d)\.(25[0-5]|2[0-4]\d|[0-1]\d{2}|[1-9]?\d)\.(25[0-5]|2[0-4]\d|[0-1]\d{2}|[1-9]?\d)',kl) for lk in lox: los=".".join(lk) print(los) with open('采集到的IP.txt','a') as lp: lp.write(los) dailihttps()

运行效果图:

image.png

五件套 — python爬站长之家写一个信息搜集器 import optparse import requests import re import sys from bs4 import BeautifulSoup def main(): usage="[-z Subdomain mining]" \ "[-p Side of the station inquiries]" \ "[-x http status query]" parser=optparse.OptionParser(usage) parser.add_option('-z',dest="Subdomain",help="Subdomain mining") parser.add_option('-p',dest='Side',help='Side of the station inquiries') parser.add_option('-x',dest='http',help='http status query') (options,args)=parser.parse_args() if options.Subdomain: subdomain=options.Subdomain Subdomain(subdomain) elif options.Side: side=options.Side Side(side) elif options.http: http=options.http Http(http) else: parser.print_help() sys.exit() def Subdomain(subdomain): print('-----------Subdomains quickly tap-----------') url="http://m.tool.chinaz.com/subdomain/?domain={}".format(subdomain) header={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'} r=requests.get(url,headers=header).content g = re.finditer('<td>\D[a-zA-Z0-9][-a-zA-Z0-9]{0,62}\D(\.[a-zA-Z0-9]\D[-a-zA-Z0-9]{0,62})+\.?</td>', str(r)) for x in g: lik="".join(str(x)) opg=BeautifulSoup(lik,'html.parser') for link in opg.find_all('td'): lops=link.get_text() print(lops) def Side(side): print('--------Side of the station inquiries--------') url="http://m.tool.chinaz.com/same/?s={}".format(side) header={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'} r=requests.get(url,headers=header).content g=r.decode('utf-8') ksd=re.finditer('<a href=http://www.52bug.cn/hkjs/.*?>[a-zA-Z0-9][-a-zA-Z0-9]{0,62}(\.[a-zA-Z0-9][-a-zA-Z0-9]{0,62})+\.?</a>',str(g)) for l in ksd: ops="".join(str(l)) pods=BeautifulSoup(ops,'html.parser') for xsd in pods.find_all('a'): sde=re.findall('[a-zA-z]+://[^\s]*',str(xsd)) low="".join(sde) print(low) def Http(http): print('--------Http status query--------') url="http://{}".format(http) header={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'} r=requests.get(url,headers=header) b=r.headers for sdw in b: print(sdw,':',b[sdw]) if __name__ == '__main__': main()

运行截图:

-h 帮助

-z 子域名挖掘

-p 旁站查询

如果觉得我的文章对您有用,请随意打赏。您的支持将鼓励我继续创作!

标签云