[英]Obtaining correct data using Regex/List
我正在使用正則表達式解析以下代碼(我知道這不理想,但這是另一回事了):
data:{
url: 'stage-team-stat'
},
defaultParams: {
stageId : 9155,
field: 2,
teamId: 26
}
};
正在使用以下代碼(其中var是以上代碼)進行解析:
import re
stagematch = re.compile("data:\s*{\s*url:\s*'stage-team-stat'\s*},\s*defaultParams:\s*{\s*(.*?),.*},",re.S)
stagematch2 = re.search(stagematch, var)
if stagematch2 is not None:
stagematch3 = stagematch2.group(1)
stageid = int(stagematch3.split(':', 1)[1])
stageid = str(stageid)
teamid = int(stagematch3.split(':', 3)[1])
teamid = str(teamid)
print stageid
print teamid
在此示例中,我希望stageid
為“ 9155”,而teamid
為“ 32”,但是它們都將返回為“ 9155”。
誰能看到我在做什么錯?
謝謝
另一種解決方案是不深入正則表達式,而是使用JavaScript代碼解析器解析JavaScript代碼。 使用slimit
示例:
SlimIt是用Python編寫的JavaScript壓縮程序。 它將JavaScript編譯為更緊湊的代碼,以便下載和運行更快。
SlimIt還提供了一個庫,其中包括JavaScript解析器,詞法分析器,漂亮的打印機和樹訪問器。
from slimit import ast
from slimit.parser import Parser
from slimit.visitors import nodevisitor
data = """
var defaultTeamStatsConfigParams = {
data:{
url: 'stage-team-stat'
},
defaultParams: {
stageId : 9155,
field: 2,
teamId: 32
}
};
DataStore.prime('stage-team-stat', defaultTeamStatsConfigParams.defaultParams, [{"RegionId":252,"RegionCode":"gb-eng","TournamentName":"Premier League","TournamentId":2,"StageId":9155,"Field":{"Value":2,"DisplayName":"Overall"},"TeamName":"Manchester United","TeamId":32,"GamesPlayed":4,"Goals":6,"Yellow":7,"Red":0,"TotalPasses":2480,"Possession":247,"AccuratePasses":2167,"AerialWon":61,"AerialLost":49,"Rating":7.01,"DefensiveRating":7.01,"OffensiveRating":6.79,"ShotsConcededIBox":13,"ShotsConcededOBox":21,"TotalTackle":75,"Interceptions":71,"Fouls":54,"WasFouled":46,"TotalShots":49,"ShotsBlocked":9,"ShotsOnTarget":19,"Dribbles":44,"Offsides":3,"Corners":17,"Throws":73,"Dispossesed":36,"TotalClearance":78,"Turnover":0,"Ranking":0}]);
var stageStatsConfig = {
id: 'team-stage-stats',
singular: true,
filter: {
instanceType: WS.Filter,
id: 'team-stage-stats-filter',
categories: { data: [{ value: 'field' }] },
singular: true
},
params: defaultTeamStatsConfigParams,
content: {
instanceType: TeamStageStats,
view: {
renderTo: 'team-stage-stats-content'
}
}
};
var stageStats = new WS.Panel(stageStatsConfig);
stageStats.load();
"""
parser = Parser()
tree = parser.parse(data)
fields = {getattr(node.left, 'value', ''): getattr(node.right, 'value', '')
for node in nodevisitor.visit(tree)
if isinstance(node, ast.Assign)}
print fields['stageId'], fields['field'], fields['teamId']
打印9155 2 32
。
在這里,我們遍歷語法樹節點並根據所有分配構造一個字典。 其中我們有stageId
, fields
和teamId
。
這是將解決方案應用於刮y蜘蛛的方法:
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import Selector
from slimit import ast
from slimit.parser import Parser
from slimit.visitors import nodevisitor
def get_fields(data):
parser = Parser()
tree = parser.parse(data)
return {getattr(node.left, 'value', ''): getattr(node.right, 'value', '')
for node in nodevisitor.visit(tree)
if isinstance(node, ast.Assign)}
class ExampleSpider(CrawlSpider):
name = "goal2"
allowed_domains = ["whoscored.com"]
start_urls = ["http://www.whoscored.com/Teams/32/Statistics/England-Manchester-United"]
download_delay = 5
rules = [Rule(SgmlLinkExtractor(allow=('http://www.whoscored.com/Teams/32/Statistics/England-Manchester-United'),deny=('/News', '/Graphics', '/Articles', '/Live', '/Matches', '/Explanations', '/Glossary', 'ContactUs', 'TermsOfUse', 'Jobs', 'AboutUs', 'RSS'),), follow=False, callback='parse_item')]
def parse_item(self, response):
sel = Selector(response)
titles = sel.xpath("normalize-space(//title)")
myheader = titles.extract()[0]
script = sel.xpath('//div[@id="team-stage-stats"]/following-sibling::script/text()').extract()[0]
script_fields = get_fields(script)
print script_fields['stageId'], script_fields['field'], script_fields['teamId']
聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.