Scraping wsj.com

╄→尐↘猪︶ㄣ 提交于 2020-12-27 03:08:33

问题


I wanted to scrape some data from wsj.com and print it. The actual website is: https://www.wsj.com/market-data/stocks?mod=md_home_overview_stk_main and the data is NYSE Issues Advancing, Declining and NYSE Share Volume Advancing, Declining.

I tried using beautifulsoup after watching a youtube video but I can't get any of the classes to return a value inside body.

Here is my code:

from bs4 import BeautifulSoup
import requests


source = requests.get('https://www.wsj.com/market-data/stocks?mod=md_home_overview_stk_main').text

soup = BeautifulSoup(source, 'lxml')

body = soup.find('body')

adv = body.find('td', class_='WSJTables--table__cell--2dzGiO7q WSJTheme--table__cell--1At-VGNg ')


print(adv)

Also while inspecting elements in Network I noticed that this data is also available as a JSON.

Here is the link: https://www.wsj.com/market-data/stocks?id=%7B%22application%22%3A%22WSJ%22%2C%22marketsDiaryType%22%3A%22overview%22%7D&type=mdc_marketsdiary

So I wrote another script to try and parse this data using JSON but again its not working.

Here is the code:

import json

import requests

url = 'https://www.wsj.com/market-data/stocks?id=%7B%22application%22%3A%22WSJ%22%2C%22marketsDiaryType%22%3A%22overview%22%7D&type=mdc_marketsdiary'

response = json.loads(requests.get(url).text)

print(response)

The error I get is:

 File "C:\Users\User\Anaconda3\lib\json\decoder.py", line 355, in raw_decode
    raise JSONDecodeError("Expecting value", s, err.value) from None

JSONDecodeError: Expecting value

I also tried a few different methods from this link and none seem to work.

Can you please set me on the right path how to scrape this data?


回答1:


from bs4 import BeautifulSoup
import requests
import json


params = {
    'id': '{"application":"WSJ","marketsDiaryType":"overview"}',
    'type': 'mdc_marketsdiary'
}

headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:73.0) Gecko/20100101 Firefox/73.0"
}
r = requests.get(
    "https://www.wsj.com/market-data/stocks", params=params, headers=headers).json()


data = json.dumps(r, indent=4)

print(data)

Output:

{
    "id": "{\"application\":\"WSJ\",\"marketsDiaryType\":\"overview\"}",
    "type": "mdc_marketsdiary",
    "data": {
        "instrumentSets": [
            {
                "headerFields": [
                    {
                        "value": "name",
                        "label": "Issues"
                    }
                ],
                "instruments": [
                    {
                        "name": "Advancing",
                        "NASDAQ": "169",
                        "NYSE": "69"
                    },
                    {
                        "name": "Declining",
                        "NASDAQ": "3,190",
                        "NYSE": "2,973"
                    },
                    {
                        "name": "Unchanged",
                        "NASDAQ": "24",
                        "NYSE": "10"
                    },
                    {
                        "name": "Total",
                        "NASDAQ": "3,383",
                        "NYSE": "3,052"
                    }
                ]
            },
            {
                "headerFields": [
                    {
                        "value": "name",
                        "label": "Issues At"
                    }
                ],
                "instruments": [
                    {
                        "name": "New Highs",
                        "NASDAQ": "53",
                        "NYSE": "14"
                    },
                    {
                        "name": "New Lows",
                        "NASDAQ": "1,406",
                        "NYSE": "1,620"
                    }
                ]
            },
            {
                "headerFields": [
                    {
                        "value": "name",
                        "label": "Share Volume"
                    }
                ],
                "instruments": [
                    {
                        "name": "Total",
                        "NASDAQ": "4,454,691,895",
                        "NYSE": "7,790,947,818"
                    },
                    {
                        "name": "Advancing",
                        "NASDAQ": "506,192,012",
                        "NYSE": "219,412,232"
                    },
                    {
                        "name": "Declining",
                        "NASDAQ": "3,948,035,191",
                        "NYSE": "7,570,377,893"
                    },
                    {
                        "name": "Unchanged",
                        "NASDAQ": "464,692",
                        "NYSE": "1,157,693"
                    }
                ]
            }
        ],
        "timestamp": "4:00 PM EDT 3/09/20"
    },
    "hash": "{\"id\":\"{\\\"application\\\":\\\"WSJ\\\",\\\"marketsDiaryType\\\":\\\"overview\\\"}\",\"type\":\"mdc_marketsdiary\",\"data\":{\"instrumentSets\":[{\"headerFields\":[{\"value\":\"name\",\"label\":\"Issues\"}],\"instruments\":[{\"name\":\"Advancing\",\"NASDAQ\":\"169\",\"NYSE\":\"69\"},{\"name\":\"Declining\",\"NASDAQ\":\"3,190\",\"NYSE\":\"2,973\"},{\"name\":\"Unchanged\",\"NASDAQ\":\"24\",\"NYSE\":\"10\"},{\"name\":\"Total\",\"NASDAQ\":\"3,383\",\"NYSE\":\"3,052\"}]},{\"headerFields\":[{\"value\":\"name\",\"label\":\"Issues At\"}],\"instruments\":[{\"name\":\"New Highs\",\"NASDAQ\":\"53\",\"NYSE\":\"14\"},{\"name\":\"New Lows\",\"NASDAQ\":\"1,406\",\"NYSE\":\"1,620\"}]},{\"headerFields\":[{\"value\":\"name\",\"label\":\"Share Volume\"}],\"instruments\":[{\"name\":\"Total\",\"NASDAQ\":\"4,454,691,895\",\"NYSE\":\"7,790,947,818\"},{\"name\":\"Advancing\",\"NASDAQ\":\"506,192,012\",\"NYSE\":\"219,412,232\"},{\"name\":\"Declining\",\"NASDAQ\":\"3,948,035,191\",\"NYSE\":\"7,570,377,893\"},{\"name\":\"Unchanged\",\"NASDAQ\":\"464,692\",\"NYSE\":\"1,157,693\"}]}],\"timestamp\":\"4:00 PM EDT 3/09/20\"}}"
}

Note: You can access it as dict print(r.keys()).




回答2:


You need to add a header on the url so that it will not return error=404.

import pandas as pd
from urllib.request import urlopen   
from bs4 import BeautifulSoup as soup

url = 'https://www.wsj.com/market-data/stocks?id=%7B%22application%22%3A%22WSJ%22%2C%22marketsDiaryType%22%3A%22overview%22%7D&type=mdc_marketsdiary'
# put a header on the request
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:63.0) Gecko/20100101 Firefox/63.0'}
req = urllib.request.Request(url=url, headers=headers)
with urlopen(req) as response:
    page_html = response.read()
df = pd.DataFrame()    
data = json.loads(page_html).get('data')
for instrumentSets in data.get('instrumentSets'):
    for k,v in instrumentSets.items():
        if k == 'instruments':
            df = df.append(pd.DataFrame(v))
df=df.rename(columns = {'name':'Issues'})
df

Result:



来源:https://stackoverflow.com/questions/60606633/scraping-wsj-com

易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!