简体   繁体   中英

How to use Puppeteer to scrape a reddit page?

I am trying to learn to use Puppeteer to scrape a reddit page. The new reddit has dynamically added content and infinite scrolling. I am getting pretty inconsistent results from code and having a hard time debugging and figuring out how to make this work.

Main server.js file, not much going on here.

'use strict';

const express = require('express');
const cors = require('cors');
const app = express();
const cheerio = require('./redditScraper');

app.use(express.json());
app.use(
    cors({
        origin: ['http://localhost:3000']
    })
);

app.get('/', (req, res) => {
    let { dynamicScraper } = cheerio;

    dynamicScraper()
        .then(html => {
            console.log('data was sent');
            res.json(html);
        })
        .catch(err => {
            console.log(err);
        });
});

app.listen(process.env.PORT || 8080, () => {
    console.log(`Your app is listening on port ${process.env.PORT || 8080}`);
});

File with the scraper stuff

'use strict';

const rp = require('request-promise');
const $ = require('cheerio');
const puppeteer = require('puppeteer');
const url2 = 'https://www.reddit.com/r/GameDeals/';



const cheerio = {
    dynamicScraper: function() {
       return puppeteer
            .launch()
            .then(browser => {
                return browser.newPage();
            })
            .then(page => {
                return page.goto(url2)
                    .then(() => {
                        //attempting to scroll through page to cause loading in more elements, doesn't seem to work
                        page.evaluate(() => {
                            window.scrollBy(0, window.innerHeight);
                        })
                        return page.content()
                    });
            })
            .then(html => {
                //should log the the first post's a tag's href value
                console.log($('.scrollerItem div:nth-of-type(2) article div div:nth-of-type(3) a', html).attr('href'));

                const urls = [];

                //should be the total number of a tag's across all posts
                const numLinks = $('.scrollerItem div:nth-of-type(2) article div div:nth-of-type(3) a', html).attr('href').length;

                const links = $('.scrollerItem div:nth-of-type(2) article div div:nth-of-type(3) a', html);

                //was using numLinks as increment counter but was getting undefined, as the pages only seems to load inconsistent between 10-20 elements
                for (let i=0; i<10; i++) {
                    urls.push(links[i].attribs.href);
                }

                console.log('start of urls:', urls);
                console.log('scraped urls:', urls.length);
                console.log('intended number of urls to be scraped:', numLinks);
                // console.log($('.scrollerItem div:nth-of-type(2) article div div:nth-of-type(3) a', html).length);
            })
            .catch(err => console.log(err));
    }

}

module.exports = cheerio;

The above code currently works, however as you can see from the comments, I have the counter hard coded to 10, which is obviously not the total number of <a href=#> on the page.

Here is the output for the above:

[nodemon] starting `node server.js`
Your app is listening on port 8080
https://www.gamebillet.com/garfield-kart
start of urls: [ 'https://www.gamebillet.com/garfield-kart',
  'https://www.humblebundle.com/store/deep-rock-galactic?hmb_source=humble_home&hmb_medium=product_tile&hmb_campaign=mosaic_section_1_layout_index_9_layout_type_twos_tile_index_1',  'https://www.greenmangaming.com/games/batman-arkham-asylum-game-of-the-year/',
  'https://www.humblebundle.com/store/ftl-faster-than-light',
  'https://www.greenmangaming.com/vip/vip-deals/',
  'https://store.steampowered.com/app/320300/',
  'https://store.steampowered.com/app/356650/Deaths_Gambit/',
  'https://www.chrono.gg/?=Turmoil',
  'https://www.fanatical.com/en/game/slain',
  'https://freebies.indiegala.com/super-destronaut/?dev_id=freebies' ]
scraped urls: 10
numLinks: 40
data was sent

Here is the output when the for loop is changed to numlinks

for (let i=0; i<numLinks; i++) {
    urls.push(links[i].attribs.href);
}
[nodemon] starting `node server.js`
Your app is listening on port 8080
https://www.gamebillet.com/garfield-kart
TypeError: Cannot read property 'attribs' of undefined
    at puppeteer.launch.then.then.then.html (/file.js:49:40)
    at process._tickCallback (internal/process/next_tick.js:68:7)
data was sent

I hope this isn't too much of a mess to read. I would appreciate any help. Thank you.

Update/edit:

I am trying to implement the async way, but I'm not sure how to return the value to be used in the route handler?

    dynamicScraper: function() {
        async function f() {
            const browser = await puppeteer.launch();
            const [page] = await browser.pages();

            await page.goto(url2, { waitUntil: 'networkidle0' });
            const links = await page.evaluate(async () => {
                const scrollfar = document.body.clientHeight;
                console.log(scrollfar); //trying to find the height
                window.scrollBy(0, scrollfar);
                await new Promise(resolve => setTimeout(resolve, 5000)); 
                return [...document.querySelectorAll('.scrollerItem div:nth-of-type(2) article div div:nth-of-type(3) a')]
                    .map((el) => el.href);
            });
            console.log(links, links.length);

            await browser.close();
            //how do I return the value to pass to the route handler?
            return (links);
        };
        return(f());
    }

I am getting from the console.log .

Your app is listening on port 8080
[ 'https://www.nintendo.com/games/detail/celeste-switch',
  'https://store.steampowered.com/app/460790/Bayonetta/',
  'https://www.greenmangaming.com/de/vip/vip-deals/']

But the response from server to client is an empty object on browser {}

Update/edit 2:

Nevermind, figured it out needed to handle the promise from the async function.

dynamicScraper().then((output) => {
        res.json(output);
    });

You have several problems in your code:

page.goto(url2)

By default, page.goto will only wait for the load event. Changing it to page.goto(url2, { waitUntil: 'networkidle0' }) will wait until all requests are finished.

page.evaluate(() => {
    window.scrollBy(0, window.innerHeight);
})

You are missing an await in front of that statement (or you need to embed it into your promise flow). In addition, you are not scrolling to the end of the page, but only as much as your window height is. You should use document.body.clientHeight to scroll to the end of the page.

Additionally, you have to wait some time (or for some selector that you are expecting) after scrolling. You could use this code to wait for one second so that the page has enough time to load more content:

new Promise(resolve => setTimeout(resolve, 5000))

Regarding your general idea, I would recommend only using puppeteer instead of using puppeteer to navigate and then extracting all data and putting it into cheerio. If you only use puppeteer your code could be as simple as this (you would still have to wrap it into a function):

const puppeteer = require('puppeteer');

(async () => {
    const browser = await puppeteer.launch();
    const [page] = await browser.pages();

    await page.goto('https://www.reddit.com/r/GameDeals/', { waitUntil: 'networkidle0' });
    const links = await page.evaluate(async () => {
        window.scrollBy(0, document.body.clientHeight);
        await new Promise(resolve => setTimeout(resolve, 5000)); // wait for some time, you might need to figure out a good value for this yourself
        return [...document.querySelectorAll('.scrollerItem div:nth-of-type(2) article div div:nth-of-type(3) a')]
            .map((el) => el.href);
    });
    console.log(links, links.length);

    await browser.close();
})();

The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM