Why does this code generate multiple files? I want 1 file with all entries in it

扶醉桌前 提交于 2020-02-06 17:52:30

问题


Im trying to work with both beautifulsoup and xpath and was trying to using the following code, but now im getting 1 file per URL instead of before where i was getting 1 file for all the URLS

I just moved over the reading from CSV to get the list of urls and also just added the parsing of the url and response.. but when i run this now i get alot of individual files and in some cases 1 file may actually contain 2 scraped pages data.. so do i need to move my file saving out (indent)

import scrapy
import requests
from DSG2.items import Dsg2Item
from bs4 import BeautifulSoup
import time
import datetime
import csv

class DsgSpider(scrapy.Spider):
    name = "dsg"

    def start_requests(self):
        urlLinks = []
        with open('dsgLinks.csv','r') as csvf:
            urls = csv.reader(csvf)
            for urlLink in urls:
                urlLinks.append(urlLink)

        for url in urlLinks:
            yield scrapy.Request(url=url[0], callback=self.parse)

    def parse(self, response):
        dets = Dsg2Item()
        now = time.mktime(datetime.datetime.now().timetuple())
        r = requests.get(response.url, timeout=5)

        html = r.text
        soup = BeautifulSoup(html, "html.parser")

        dets['style'] = " STYLE GOES HERE "
        dets['brand'] = " BRAND GOES HERE "                    
        dets['description'] = " DESCRIPTION GOES HERE "
        dets['price'] = " PRICE GOES HERE "
        dets['compurl'] = response.url[0]
        dets['reviewcount'] = " REVIEW COUNT GOES HERE "
        dets['reviewrating'] = " RATING COUNT GOES HERE "
        dets['model'] = " MODEL GOES HERE "
        dets['spechandle'] = " HANDLE GOES HERE "
        dets['specbladelength'] = " BLADE LENGTH GOES HERE "
        dets['specoveralllength'] = " OVERALL LENGTH GOES HERE "
        dets['specweight'] = " WEIGHT GOES HERE "
        dets['packsize'] = " PACKSIZE GOES HERE "

        for h1items in soup.find_all('h1',class_="product-title"):
            strh1item = str(h1items.get_text())
            dets['description']=strh1item.lstrip()

        for divitems in soup.find_all('div', class_="product-component"):
            for ulitems in divitems.find_all('ul'):
                for litem in ulitems.find_all('li'):
                    strlitem = str(litem.get_text())
                    if 'Model:' in strlitem:
                        bidx = strlitem.index(':')+1
                        lidx = len(strlitem)
                        dets['model']=strlitem[bidx:lidx].lstrip()

                    elif 'Handle:' in strlitem:
                        bidx = strlitem.index(':')+1
                        lidx = len(strlitem)
                        dets['spechandle']=strlitem[bidx:lidx].lstrip()

                    elif 'Blade Length:' in strlitem:
                        bidx = strlitem.index(':')+1
                        lidx = len(strlitem)
                        dets['specbladelength'] = strlitem[bidx:lidx].lstrip()

                    elif 'Overall Length:' in strlitem:
                        bidx = strlitem.index(':')+1
                        lidx = len(strlitem)
                        dets['specoveralllength'] = strlitem[bidx:lidx].lstrip()

                    elif 'Weight:' in strlitem:
                        bidx = strlitem.index(':')+1
                        lidx = len(strlitem)
                        dets['specweight'] = strlitem[bidx:lidx].lstrip()

                    elif 'Pack Qty:' in strlitem:
                        bidx = strlitem.index(':')+1
                        lidx = len(strlitem)
                        dets['packsize']=strlitem[bidx:lidx].lstrip()             

        for litems in soup.find_all('ul', class_="prod-attr-list"):
            for litem in litems.find_all('li'):
                strlitem = str(litem.get_text())
                if 'Style:' in strlitem:
                    bidx = strlitem.index(':')+1
                    lidx = len(strlitem)
                    dets['style']=strlitem[bidx:lidx].lstrip()

                elif 'Brand:' in strlitem:
                    bidx = strlitem.index(':')+1
                    lidx = len(strlitem)
                    dets['brand']=strlitem[bidx:lidx].lstrip()                    

        for divitems in soup.find_all('div', class_="outofstock-label"):
            dets['price'] = divitems.text          

        for spanitems in soup.find_all('span',class_="final-price"):
            for spanitem in spanitems.find_all('span',itemprop="price"):
                strspanitem = str(spanitem.get_text())
                dets['price'] = '${:,.2f}'.format(float(strspanitem.lstrip()))

        for divitems in soup.find_all('div',id="BVRRSummaryContainer"):
            for spanitem in divitems.find_all('span',class_="bvseo-reviewCount"):
                strspanitem = str(spanitem.get_text())
                dets['reviewcount']=strspanitem.lstrip()
            for spanitem in divitems.find_all('span',class_="bvseo-ratingValue"):
                strspanitem = str(spanitem.get_text())
                dets['reviewrating']=strspanitem.lstrip()

        filename = 'dsg-%s.csv' % str(int(now))
        locallog = open(filename, 'a+')
        locallog.write(','.join(map(str, dets.values())) +"\n")
        locallog.close()

Id like to fix this code as it works right now to save all the scraped data into 1 file as it was originally.


回答1:


You create a new filename with timestamp for each run:

filename = 'dsg-%s.csv' % str(int(now))

Just replace it with:

filename = 'dsg.csv'



来源:https://stackoverflow.com/questions/55925202/why-does-this-code-generate-multiple-files-i-want-1-file-with-all-entries-in-it

易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!