python爬虫---多线程爬取腾讯招聘信息【简单版】

∥☆過路亽.° 提交于 2019-12-24 13:31:11

多线程快速爬取腾讯招聘数据

import time

from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import os
import threading
def save_to_html(html_str,filename):
    dirname = os.path.dirname(filename)
    if not os.path.exists(dirname):
        os.mkdir(dirname)
    with open(filename,'w',encoding='utf-8') as fp:
        fp.write(html_str)
    print('下载完成!',filename)

def get_content_by_selenium(url):
    driver = webdriver.PhantomJS()
    wait = WebDriverWait(driver,20)
    #请求
    driver.get(url)
    #等待
    wait.until(EC.presence_of_all_elements_located((By.XPATH,'//div[@class="recruit-list"]')))
    #获取页面内容
    html_str = driver.page_source
    return html_str

def download(i):
    base_url = 'https://careers.tencent.com/search.html?index=%s'
    html_str = get_content_by_selenium(base_url %i)
    save_to_html(html_str,'./tencent/{}.html'.format(i))
if __name__ == '__main__':
    start = time.time()
    #用第一种创建线程来开启多线程
    crawl_list = []
    for i in range(1, 20):
        # download(i)
        #用这种方法开启线程,弊端很大
        #任务数就是线程数,这样如果任务比较大,容易造成崩溃。
        #任务无法按照顺序来执行
        t = threading.Thread(target=download,args=(i,))
        t.start()
        crawl_list.append(t)

    #阻塞主线程,我们join方法
    for t in crawl_list:
        t.join()#阻塞当前线程,指导t线程运行结束
    print(time.time()-start)#21.61223602294922
易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!