scraping multiple pages in python with BeautifulSoup

╄→尐↘猪︶ㄣ 提交于 2020-01-15 08:13:04

问题


I have managed to write code to scrape data from the first page and now the I am stuck with writing a loop in this code to scrape the next 'n' pages. Below is the code

I would appreciate if someone could guide/help me to write the code that would scrape the data from remaining pages.

Thanks!

from bs4 import BeautifulSoup
import requests
import csv


url = requests.get('https://wsc.nmbe.ch/search?sFamily=Salticidae&fMt=begin&sGenus=&gMt=begin&sSpecies=&sMt=begin&multiPurpose=slsid&sMulti=&mMt=contain&searchSpec=s').text

soup = BeautifulSoup(url, 'lxml')

elements = soup.find_all('div', style="border-bottom: 1px solid #C0C0C0; padding: 10px 0;")
#print(elements)

csv_file = open('wsc_scrape.csv', 'w')

csv_writer = csv.writer(csv_file)

csv_writer.writerow(['sp_name', 'species_author', 'status', 'family'])


for element in elements:
    sp_name = element.i.text.strip()
    print(sp_name)



    status = element.find('span', class_ = ['success label', 'error label']).text.strip()
    print(status)




    author_family = element.i.next_sibling.strip().split('|')
    species_author = author_family[0].strip()
    family = author_family[1].strip()
    print(species_author)
    print(family)


    print()

    csv_writer.writerow([sp_name, species_author, status, family])

csv_file.close()

回答1:


You have to pass page= parameter in URL and iterate over all pages:

from bs4 import BeautifulSoup
import requests
import csv

csv_file = open('wsc_scrape.csv', 'w', encoding='utf-8')
csv_writer = csv.writer(csv_file)
csv_writer.writerow(['sp_name', 'species_author', 'status', 'family'])

for i in range(151):
    url = requests.get('https://wsc.nmbe.ch/search?page={}&sFamily=Salticidae&fMt=begin&sGenus=&gMt=begin&sSpecies=&sMt=begin&multiPurpose=slsid&sMulti=&mMt=contain&searchSpec=s'.format(i+1)).text
    soup = BeautifulSoup(url, 'lxml')
    elements = soup.find_all('div', style="border-bottom: 1px solid #C0C0C0; padding: 10px 0;")
    for element in elements:
        sp_name = element.i.text.strip()
        print(sp_name)
        status = element.find('span', class_ = ['success label', 'error label']).text.strip()
        print(status)
        author_family = element.i.next_sibling.strip().split('|')
        species_author = author_family[0].strip()
        family = author_family[1].strip()
        print(species_author)
        print(family)
        print()
        csv_writer.writerow([sp_name, species_author, status, family])

csv_file.close()



回答2:


I am not entirely sure of how your descriptions map to on the page but the following shows the principle of the loop and how to extract info.

import requests 
from bs4 import BeautifulSoup as bs
import pandas as pd
n = 4
results = []
headers = ['Success/Failure', 'Names', 'AuthorInfo', 'Family']
df = pd.DataFrame(columns = headers)
with requests.Session() as s:
    for page in range(1,n + 1):
        r = s.get('https://wsc.nmbe.ch/search?sFamily=Salticidae&fMt=begin&sGenus=&gMt=begin&sSpecies=&sMt=begin&multiPurpose=slsid&sMulti=&mMt=contain&searchSpec=s&page={}'.format(page))
        soup = bs(r.content, 'lxml')
        failSucceed = [item.text for item in soup.select('.success, .error')]
        names = [item.text for item in soup.select('.ym-gbox div > i')]
        authorInfo = [item.next_sibling for item in soup.select('.ym-gbox div > i')]
        family= [item.split('|')[1] for item in authorInfo]   
        dfCurrent = pd.DataFrame(list(zip(failSucceed, names, authorInfo, family)))
        df = pd.concat([df, dfCurrent])
df = df.reset_index(drop=True)
df.to_csv(r"C:\Users\User\Desktop\test.csv", encoding='utf-8') 
print(df)

You can get the number of results pages with the following:

numPages = int(soup.select('[href*=search\?page]')[-2].text)


来源:https://stackoverflow.com/questions/54861405/scraping-multiple-pages-in-python-with-beautifulsoup

易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!