python

超轻量级php框架startmvc

python采集百度搜索结果带有特定URL的链接代码实例

更新时间:2020-07-28 16:42:02 作者:startmvc
这篇文章主要介绍了python采集百度搜索结果带有特定URL的链接代码实例,文中通过示例代码

这篇文章主要介绍了python采集百度搜索结果带有特定URL的链接代码实例,文中通过示例代码介绍的非常详细,对大家的学习或者工作具有一定的参考学习价值,需要的朋友可以参考下


#coding utf-8
import requests
from bs4 import BeautifulSoup as bs
import re
from Queue import Queue
import threading
from argparse import ArgumentParser

arg = ArgumentParser(description='baidu_url_collet py-script by xiaoye')
arg.add_argument('keyword',help='keyword like inurl:?id=for searching sqli site')
arg.add_argument('-p','--page',help='page count',dest='pagecount',type=int)
arg.add_argument('-t','--thread',help='the thread_count',dest='thread_count',type=int,default=10)
arg.add_argument('-o','--outfile',help='the file save result',dest='oufile',type=int,default='result.txt')
result = arg.parse_args()
headers = {'User-Agent':'Mozilla/5.0(windows NT 10.0 WX64;rv:50.0) Gecko/20100101 Firefox/50.0'}

class Bg_url(threading.Thread):
 def __init__(self,que):
 threading.Thread.__init__(self)
 self._que = que
 def run(self):
 while not self._que.empty():
 URL = self._que.get()
 try:
 self.bd_url_collet(URL)
 except Exception,e:
 print(e)
 pass
 def bd_url_collect(self, url):
 r = requests.get(url, headers=headers, timeout=3)
 soup = bs(r.content, 'lxml', from_encoding='utf-8')
 bqs = soup.find_all(name='a', attrs={‘data-click‘:re.compile(r'.'), 'class':None})#获得从百度搜索出来的a标签的链接
 for bq in bqs:
 r = requests.get(bq['href'], headers=headers, timeout=3)#获取真实链接
 if r.status_code == 200:#如果状态码为200
 print r.url
 with open(result.outfile, 'a') as f:
 f.write(r.url + '\n')
def main():
 thread = []
 thread_count = result.thread_count
 que = Queue()
 for i in range(0,(result.pagecount-1)*10,10):
 que.put('https://www.baidu.com/s?wd=' + result.keyword + '&pn=' + str(i))
 or i in range(thread_count):
 thread.append(Bd_url(que))
 for i in thread:
 i.start()
 for i in thread:
 i.join() 
if __name__ == '__main__':
 main() 
#执行格式
python aaaaa.py "inurl:asp?id=" -p 30 -t 30

以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持脚本之家。

python 采集 百度搜索结果 特定url