信息搜集之被动信息搜集

阅读: 评论:0

信息搜集之被动信息搜集

信息搜集之被动信息搜集

被动信息搜集:不与目标主机进行直接交互,通常根据搜索引擎或者社交等方式间接获取目标主机的信息。

DNS解析

1.1IP查询

查询域名对应的IP

import socket
import argparsedef doamin_parse():usage = 'usage: python %(prog)s domain'parser = argparse.ArgumentParser(usage=usage)parser.add_argument('domain')options = parser.parse_args()domain = options.domainreturn domainif __name__ == '__main__':domain = doamin_parse()ip = hostbyname(domain)print(ip)

1.2Whois查询

whois查询域名注册信息

安装python-whois

from whois import whois
import argparsedef doamin_parse():usage = 'usage: python %(prog)s domain'parser = argparse.ArgumentParser(usage=usage)parser.add_argument('domain')options = parser.parse_args()domain = options.domainreturn domainif __name__ == '__main__':domain = doamin_parse()data = whois(domain)print(data)

1.3子域名查询

第三方软件

Onefoall、sublist3r、云悉、layer子域名挖掘机、御剑子域名扫描、dnsenum

python利用bing进行子域名查询

import requests
from bs4 import BeautifulSoup
from urllib.parse import urlparse
import sysdef bing_search(site, pages):Subdomain = [] # 以列表形式存储子域名headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36','Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7','Referer': '/','cookie': 'MUID=2D75DFAC11BE6E2C17A5CE1110BF6F5D; SRCHD=AF=NOFORM; SRCHUID=V=2&GUID=6E08C7F335954E4A81F836E094D9EE88&dmnchg=1;'' _UR=cdxcls=0&QS=0&TQS=0; SRCHUSR=DOB=20230208&T=1701833557000; _Rwho=u=d; _SS=SID=0E3DBDDEA58566841768AE01A43A679F&R=0&RB=0&GB=0&RG=200&RP=0; ipv6=hit=1701837160490&t=6;'}for i in range(1, int(pages)+1):url = "=site%3a" + site + "&go=search&qs=ds&first=" + str((int(i)-1)*10) + "&FORM=PERE"html = (url, stream=True, headers=headers)soup = t, 'html.parser')job_bt = soup.findAll('h2')for i in job_bt:link = i.find('a')if link:href = link['href']domain = str(urlparse(href).scheme + "://" + urlparse(href)loc)if domain in Subdomain:passelse:Subdomain.append(domain)print(domain)if __name__ == '__main__':if len(sys.argv) == 3:site = sys.argv[1]page = sys.argv[2]else:print("usage: %s baidu 10" % sys.argv[0])it(-1)Subdomain = bing_search(site, page)

1.4目录扫描

dirsearch

1.4C段扫描

nbtscan、netdiscover

1.5邮件获取

#-*- coding:utf-8 -*-
import sys
import getopt
import requests
from bs4 import BeautifulSoup
import re
import time
import threading#banner信息
def banner():print('33[1;34m########################################################################################33[0mn''33[1;34m######################################33[1;32m邮件爬取33[1;34m#####################################33[0mn''33[1;34m########################################################################################33[0mn')
#使用规则
def usage():print('-h: --help 帮助;')print('-u: --url  域名;')print('-p: --pages 页数;')print('eg: python -u "www.baidu" -p 100'+'n')it()
##未授权函数检测#主函数,传入输入参数进入
def start(argv):url = ""pages = ""if len(sys.argv) < 2:print("-h 帮助信息;n")it()#定义异常处理try:banner()opts,args = pt(argv,"-u:-p:-h")except getopt.GetoptError:print('Error an argument!')it()for opt,arg in opts:if opt == "-u":url = argelif opt == "-p":pages = argelif opt == "-h":print(usage())threader(url, pages)class MyThread(threading.Thread):def __init__(self, func, args=()):super(MyThread, self).__init__()self.func = funcself.args = argsdef run(self):if self.args[1] < 1:sult = self.func(*self.args)  # 在执行函数的同时,把结果赋值给result,然后通过get_result函数获取返回的结果def get_result(self):try:sultexcept Exception as e:return Nonedef threader(url,pages):launcher(url,pages)#漏洞回调函数cd
def launcher(url,pages):if len(pages)< 1:passelse:for page in range(1,int(pages)+1):keyword(url,page)def keyword(url,page):threads = []email_sum = []email_num = []key_words = ['email', 'mail', 'mailbox', '邮件', '邮箱', 'postbox']for key_word in key_words:t = MyThread(emails, args=(url, page,key_word))t.start()threads.append(t)for t in threads:t.join()  # 一定执行join,等待子进程执行结束,主进程再往下执行email_num._result())for email in email_num:for list in email:if list in email_sum:passelse:email_sum.append(list)print(list)def emails(url,page,key_word):bing_emails = bing_search(url, page, key_word)baidu_emails = baidu_search(url, page, key_word)sum_emails = bing_emails + baidu_emailsreturn sum_emailsdef bing_search(url,page,key_word):referer = "=email+site%3abaidu&qs=n&sp=-1&pq=emailsite%3abaidu&first=1&FORM=PERE1"conn = requests.session()bing_url = "=" + key_word + "+site%3a" + url + "&qs=n&sp=-1&pq=" + key_word + "site%3a" + url + "&first=" + str((page-1)*10) + "&FORM=PERE1&#('', headers=headers(referer))r = (bing_url, stream=True, headers=headers(referer), timeout=8)emails = search_)return emailsdef baidu_search(url,page,key_word):email_list = []emails = []referer = "=email+site%3Abaidu&pn=1"baidu_url = "="+key_word+"+site%3A"+url+"&pn="+str((page-1)*10)conn = requests.session()(referer,headers=headers(referer))r = (baidu_url, headers=headers(referer))soup = , 'lxml')tagh3 = soup.find_all('h3')for h3 in tagh3:href = h3.find('a').get('href')try:r = (href, headers=headers(referer),timeout=8)emails = search_)except Exception as e:passfor email in emails:email_list.append(email)return email_listdef search_email(html):emails = re.findall(r"[a-z0-9.-+_]+@[a-z0-9.-+_]+.[a-z]+",html,re.I)return emailsdef headers(referer):headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0','Accept': '*/*','Accept-Language': 'en-US,en;q=0.5','Accept-Encoding': 'gzip,deflate','Referer': referer}return headersif __name__ == '__main__':#定义异常try:start(sys.argv[1:])except KeyboardInterrupt:print("interrupted by user, killing ")

本文发布于:2024-02-01 15:17:54,感谢您对本站的认可!

本文链接:https://www.4u4v.net/it/170677187637534.html

版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系,我们将在24小时内删除。

标签:信息
留言与评论(共有 0 条评论)
   
验证码:

Copyright ©2019-2022 Comsenz Inc.Powered by ©

网站地图1 网站地图2 网站地图3 网站地图4 网站地图5 网站地图6 网站地图7 网站地图8 网站地图9 网站地图10 网站地图11 网站地图12 网站地图13 网站地图14 网站地图15 网站地图16 网站地图17 网站地图18 网站地图19 网站地图20 网站地图21 网站地图22/a> 网站地图23