繁体   English   中英

Python - BeautifulSoup - 抓取的内容仅写入第一个文本文件,不写入后续文件

[英]Python - BeautifulSoup - Scraped content only being written to first text file, not subsequent files

我目前正在使用下面的代码从运动日程网站和 output 将信息抓取到文本文件中。 目前使用我拥有的代码,数据正确打印到控制台,并且来自第一个 URL ( https://sport-tv-guide.live/live/darts )的数据按预期输出到文本文件。

问题是第二个 URL ( https://sport-tv-guide.live/live/boxing/ ) 中的内容没有输出到预期的文本文件中(文本文件已创建,但其中没有内容) .

我正在使用的代码如下:

import requests
import time
from bs4 import BeautifulSoup

def makesoup(url):
    cookies = {'mycountries' : '101,28,3,102,42,10,18,4,2,22', 'user_time_zone': 'Europe/London',  'user_time_zone_id': '1'} 
    r = requests.post(url,  cookies=cookies)
    return BeautifulSoup(r.text,"lxml")
   
def linkscrape(links, savefile):
    baseurl = "https://sport-tv-guide.live"
    urllist = []
    
    for link in links:
        finalurl = (baseurl+ link['href'])
        urllist.append(finalurl)
        # print(finalurl)
        
    for singleurl in urllist:
        soup2=makesoup(url=singleurl)
        g_data=soup2.find_all('div', {'id': 'channelInfo'})
        c_data=soup2.find_all('div', {'class': 'liveOtherStations clearfix'})
    
    with open(savefile ,"w") as text_file:
        
        for match in g_data:
            try:
                hometeam =  match.find_previous('div', class_='cell40 text-center teamName1').text.strip()
                awayteam =  match.find_previous('div', class_='cell40 text-center teamName2').text.strip()
                print("Competitors; ", hometeam +" "+ "vs" +" "+ awayteam)
            except:
                hometeam = "Home Team element not found"
                awayteam = "Away Team element not found"
            try:
                startime =  match.find('div', class_='time full').text.strip()
                print("Time; ", startime) 
            except:
                startime = "Time element not found"
            try:
                event=  match.find('div', class_='title full').text.strip()
                print("Event:",  event)
            except:
                event = "Event element not found"
            try:
                dateandtime = match.find('div', class_='date full').text.strip()
                print("Date:",  dateandtime)
            except:
                dateandtime = "Date not found"
            try:
                sport = match.find('div', class_='text full').text.strip()
                print("Sport:",  sport)
            except:
                sport = "Sport element not found"
            try:
                singlechannel = match.find('div', class_='station full').text.strip()
                print("Main Channel:",  singlechannel)
                print("-----")
            except:
                singlechannel = "Single Channel element not found"
            for channel in c_data:
                try:
                    channels = match.find('div', class_='stationLive active col-wrap')
                    print("Extra Channels:",  channel.text)
                except:
                    channels = "No channels found"
                    print(channels)
                print("-------")
                
                text_file.writelines("__**Sport:**__" +':' + ' '+ sport +" \n"+"__**Competitors:**__" +':' + ' '+ hometeam + awayteam + event+" \n"+"__**Match Date:**__" +':' + ' ' +dateandtime +" \n"+"__**Match Time:**__"+':' + ' ' +startime +" \n"+ "__**Main Channel**__"+':' + ' '+singlechannel+" \n" + "__**Channels**__"+':' + ' '+channel.text+" \n"+'-' *20 + " \n")


            
def matches():
    
    dict = {"https://sport-tv-guide.live/live/darts/":"/home/brendan/Desktop/testing,txt",  
"https://sport-tv-guide.live/live/boxing/":"/home/brendan/Desktop/boxing.txt"}

    for key,  value  in dict.items():
        soup=makesoup(url = key)
        linkscrape(links= soup.find_all('a', {'class': 'article flag',  'href' : True}) , savefile = value)
        
matches()

下图显示了我正在打印到控制台的 output,它显示正确。 输出

我认为可能存在用于打开文本文件的 while 循环 position 的问题,导致它被创建,但实际.writelines function 在成功创建第一个文本文件后无法正确运行。 我已经尝试过从 while 循环开始的所有代码,但这对 output 没有影响。

不幸的是,我不确定如何从这里开始。

感谢任何可以提供帮助或解决此问题的人。

发现了问题。 在您的代码中,对于boxing url - https://sport-tv-guide.live/live/boxing/没有额外的频道。 因此,控件不会在循环内 go 并且没有 output 写入文件。

您可以在列表中收集所有额外的频道,然后写入文件

import requests
import time
from bs4 import BeautifulSoup

def makesoup(url):
    cookies = {'mycountries' : '101,28,3,102,42,10,18,4,2,22', 'user_time_zone': 'Europe/London',  'user_time_zone_id': '1'} 
    r = requests.post(url,  cookies=cookies)
    return BeautifulSoup(r.text,"lxml")
   
def linkscrape(links, savefile):
    baseurl = "https://sport-tv-guide.live"
    urllist = []
    print(savefile)
    for link in links:
        finalurl = (baseurl+ link['href'])
        urllist.append(finalurl)
        # print(finalurl)
        
    for singleurl in urllist:
        soup2=makesoup(url=singleurl)
        g_data=soup2.find_all('div', {'id': 'channelInfo'})
        c_data=soup2.find_all('div', {'class': 'liveOtherStations clearfix'})
    
    with open(savefile ,"w") as text_file:
        
        for match in g_data:
            try:
                hometeam =  match.find_previous('div', class_='cell40 text-center teamName1').text.strip()
                awayteam =  match.find_previous('div', class_='cell40 text-center teamName2').text.strip()
                print("Competitors; ", hometeam +" "+ "vs" +" "+ awayteam)
            except:
                hometeam = "Home Team element not found"
                awayteam = "Away Team element not found"
            try:
                startime =  match.find('div', class_='time full').text.strip()
                print("Time; ", startime) 
            except:
                startime = "Time element not found"
            try:
                event=  match.find('div', class_='title full').text.strip()
                print("Event:",  event)
            except:
                event = "Event element not found"
            try:
                dateandtime = match.find('div', class_='date full').text.strip()
                print("Date:",  dateandtime)
            except:
                dateandtime = "Date not found"
            try:
                sport = match.find('div', class_='text full').text.strip()
                print("Sport:",  sport)
            except:
                sport = "Sport element not found"
            try:
                singlechannel = match.find('div', class_='station full').text.strip()
                print("Main Channel:",  singlechannel)
                print("-----")
            except:
                singlechannel = "Single Channel element not found"
            
            extra_channels = []

            for channel in c_data:
                try:
                    channels = match.find('div', class_='stationLive active col-wrap')
                    print("Extra Channels:",  channel.text)
                    extra_channels.append(channel.text)
                except:
                    channels = "No channels found"
                    print(channels)
                    extra_channels.append(channel.text)
                print("-------")
            
            if extra_channels:
                for channel in extra_channels:    
                    text_file.writelines("__**Sport:**__" +':' + ' '+ sport +" \n"+"__**Competitors:**__" +':' + ' '+ hometeam + awayteam + event+" \n"+"__**Match Date:**__" +':' + ' ' +dateandtime +" \n"+"__**Match Time:**__"+':' + ' ' +startime +" \n"+ "__**Main Channel**__"+':' + ' '+singlechannel+" \n" + "__**Channels**__"+':' + ' '+channel+" \n"+'-' *20 + " \n")
            else:
                text_file.writelines("__**Sport:**__" +':' + ' '+ sport +" \n"+"__**Competitors:**__" +':' + ' '+ hometeam + awayteam + event+" \n"+"__**Match Date:**__" +':' + ' ' +dateandtime +" \n"+"__**Match Time:**__"+':' + ' ' +startime +" \n"+ "__**Main Channel**__"+':' + ' '+singlechannel+" \n" + "__**Channels**__"+':' + " \n"+'-' *20 + " \n")
            


            
def matches():
    
    dict = {"https://sport-tv-guide.live/live/darts/":"testing.txt",  
"https://sport-tv-guide.live/live/boxing/":"boxing.txt"}

    for key,  value  in dict.items():
        soup=makesoup(url = key)
        linkscrape(links= soup.find_all('a', {'class': 'article flag',  'href' : True}) , savefile = value)
        
matches()

暂无
暂无

声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM