繁体   English   中英

如何使用 Puppeteer 抓取 reddit 页面?

[英]How to use Puppeteer to scrape a reddit page?

我正在尝试学习使用 Puppeteer 来抓取 reddit 页面。 新的 reddit 动态添加了内容和无限滚动。 我从代码中得到了非常不一致的结果,并且很难调试和弄清楚如何使其工作。

主 server.js 文件,这里没有太多内容。

'use strict';

const express = require('express');
const cors = require('cors');
const app = express();
const cheerio = require('./redditScraper');

app.use(express.json());
app.use(
    cors({
        origin: ['http://localhost:3000']
    })
);

app.get('/', (req, res) => {
    let { dynamicScraper } = cheerio;

    dynamicScraper()
        .then(html => {
            console.log('data was sent');
            res.json(html);
        })
        .catch(err => {
            console.log(err);
        });
});

app.listen(process.env.PORT || 8080, () => {
    console.log(`Your app is listening on port ${process.env.PORT || 8080}`);
});

用刮刀的东西归档

'use strict';

const rp = require('request-promise');
const $ = require('cheerio');
const puppeteer = require('puppeteer');
const url2 = 'https://www.reddit.com/r/GameDeals/';



const cheerio = {
    dynamicScraper: function() {
       return puppeteer
            .launch()
            .then(browser => {
                return browser.newPage();
            })
            .then(page => {
                return page.goto(url2)
                    .then(() => {
                        //attempting to scroll through page to cause loading in more elements, doesn't seem to work
                        page.evaluate(() => {
                            window.scrollBy(0, window.innerHeight);
                        })
                        return page.content()
                    });
            })
            .then(html => {
                //should log the the first post's a tag's href value
                console.log($('.scrollerItem div:nth-of-type(2) article div div:nth-of-type(3) a', html).attr('href'));

                const urls = [];

                //should be the total number of a tag's across all posts
                const numLinks = $('.scrollerItem div:nth-of-type(2) article div div:nth-of-type(3) a', html).attr('href').length;

                const links = $('.scrollerItem div:nth-of-type(2) article div div:nth-of-type(3) a', html);

                //was using numLinks as increment counter but was getting undefined, as the pages only seems to load inconsistent between 10-20 elements
                for (let i=0; i<10; i++) {
                    urls.push(links[i].attribs.href);
                }

                console.log('start of urls:', urls);
                console.log('scraped urls:', urls.length);
                console.log('intended number of urls to be scraped:', numLinks);
                // console.log($('.scrollerItem div:nth-of-type(2) article div div:nth-of-type(3) a', html).length);
            })
            .catch(err => console.log(err));
    }

}

module.exports = cheerio;

上面的代码目前有效,但是正如您从评论中看到的,我将计数器硬编码为 10,这显然不是页面上<a href=#>的总数。

这是上面的输出:

[nodemon] starting `node server.js`
Your app is listening on port 8080
https://www.gamebillet.com/garfield-kart
start of urls: [ 'https://www.gamebillet.com/garfield-kart',
  'https://www.humblebundle.com/store/deep-rock-galactic?hmb_source=humble_home&hmb_medium=product_tile&hmb_campaign=mosaic_section_1_layout_index_9_layout_type_twos_tile_index_1',  'https://www.greenmangaming.com/games/batman-arkham-asylum-game-of-the-year/',
  'https://www.humblebundle.com/store/ftl-faster-than-light',
  'https://www.greenmangaming.com/vip/vip-deals/',
  'https://store.steampowered.com/app/320300/',
  'https://store.steampowered.com/app/356650/Deaths_Gambit/',
  'https://www.chrono.gg/?=Turmoil',
  'https://www.fanatical.com/en/game/slain',
  'https://freebies.indiegala.com/super-destronaut/?dev_id=freebies' ]
scraped urls: 10
numLinks: 40
data was sent

这是for循环改为numlinks时的输出

for (let i=0; i<numLinks; i++) {
    urls.push(links[i].attribs.href);
}
[nodemon] starting `node server.js`
Your app is listening on port 8080
https://www.gamebillet.com/garfield-kart
TypeError: Cannot read property 'attribs' of undefined
    at puppeteer.launch.then.then.then.html (/file.js:49:40)
    at process._tickCallback (internal/process/next_tick.js:68:7)
data was sent

我希望这不会太乱读。 我将不胜感激任何帮助。 谢谢你。

更新/编辑:

我正在尝试实现异步方式,但我不确定如何返回要在路由处理程序中使用的值?

    dynamicScraper: function() {
        async function f() {
            const browser = await puppeteer.launch();
            const [page] = await browser.pages();

            await page.goto(url2, { waitUntil: 'networkidle0' });
            const links = await page.evaluate(async () => {
                const scrollfar = document.body.clientHeight;
                console.log(scrollfar); //trying to find the height
                window.scrollBy(0, scrollfar);
                await new Promise(resolve => setTimeout(resolve, 5000)); 
                return [...document.querySelectorAll('.scrollerItem div:nth-of-type(2) article div div:nth-of-type(3) a')]
                    .map((el) => el.href);
            });
            console.log(links, links.length);

            await browser.close();
            //how do I return the value to pass to the route handler?
            return (links);
        };
        return(f());
    }

我从console.log得到。

Your app is listening on port 8080
[ 'https://www.nintendo.com/games/detail/celeste-switch',
  'https://store.steampowered.com/app/460790/Bayonetta/',
  'https://www.greenmangaming.com/de/vip/vip-deals/']

但是从服务器到客户端的响应是浏览器上的一个空对象{}

更新/编辑 2:

没关系,发现需要处理异步函数的承诺。

dynamicScraper().then((output) => {
        res.json(output);
    });

您的代码中有几个问题:

page.goto(url2)

默认情况下, page.goto只会等待load事件。 将其更改为page.goto(url2, { waitUntil: 'networkidle0' })将等到所有请求完成。

page.evaluate(() => {
    window.scrollBy(0, window.innerHeight);
})

您在该语句之前缺少await (或者您需要将其嵌入到您的承诺流程中)。 此外,您不会滚动到页面末尾,而是滚动到您的窗口高度。 您应该使用document.body.clientHeight滚动到页面末尾。

此外,您必须在滚动后等待一段时间(或等待您期望的某些选择器)。 您可以使用此代码等待一秒钟,以便页面有足够的时间加载更多内容:

new Promise(resolve => setTimeout(resolve, 5000))

关于您的总体想法,我建议仅使用 puppeteer 而不是使用 puppeteer 进行导航,然后提取所有数据并将其放入cheerio。 如果您只使用 puppeteer,您的代码可能会像这样简单(您仍然需要将其包装到一个函数中):

const puppeteer = require('puppeteer');

(async () => {
    const browser = await puppeteer.launch();
    const [page] = await browser.pages();

    await page.goto('https://www.reddit.com/r/GameDeals/', { waitUntil: 'networkidle0' });
    const links = await page.evaluate(async () => {
        window.scrollBy(0, document.body.clientHeight);
        await new Promise(resolve => setTimeout(resolve, 5000)); // wait for some time, you might need to figure out a good value for this yourself
        return [...document.querySelectorAll('.scrollerItem div:nth-of-type(2) article div div:nth-of-type(3) a')]
            .map((el) => el.href);
    });
    console.log(links, links.length);

    await browser.close();
})();

暂无
暂无

声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM