我想在python脚本中使用蜘蛛的输出。为此,我基于另一个thread编写了以下代码。
我面临的问题是,函数spider_results()仅一次又一次返回最后一个项目的列表,而不是包含所有找到的项目的列表。当我使用scrapy crawl命令手动运行同一蜘蛛时,将获得所需的输出。下面是脚本的输出,手动json输出和Spider本身。
我的代码有什么问题?
from scrapy import signals
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
from circus.spiders.circus import MySpider
from scrapy.signalmanager import dispatcher
def spider_results():
results = []
def crawler_results(signal, sender, item, response, spider):
results.append(item)
dispatcher.connect(crawler_results, signal=signals.item_passed)
process = CrawlerProcess(get_project_settings())
process.crawl(MySpider)
process.start() # the script will block here until the crawling is finished
return results
if __name__ == '__main__':
print(spider_results())
脚本输出:
{'away_odds': 1.44,
'away_team': 'Los Angeles Dodgers',
'event_time': datetime.datetime(2019, 6, 8, 2, 15),
'home_odds': 2.85,
'home_team': 'San Francisco Giants',
'last_update': datetime.datetime(2019, 6, 6, 20, 58, 41, 655497),
'league': 'MLB'}, {'away_odds': 1.44,
'away_team': 'Los Angeles Dodgers',
'event_time': datetime.datetime(2019, 6, 8, 2, 15),
'home_odds': 2.85,
'home_team': 'San Francisco Giants',
'last_update': datetime.datetime(2019, 6, 6, 20, 58, 41, 655497),
'league': 'MLB'}, {'away_odds': 1.44,
'away_team': 'Los Angeles Dodgers',
'event_time': datetime.datetime(2019, 6, 8, 2, 15),
'home_odds': 2.85,
'home_team': 'San Francisco Giants',
'last_update': datetime.datetime(2019, 6, 6, 20, 58, 41, 655497),
'league': 'MLB'}]
JSON输出带有抓取抓取:
[
{"home_team": "Los Angeles Angels", "away_team": "Seattle Mariners", "event_time": "2019-06-08 02:07:00", "home_odds": 1.58, "away_odds": 2.4, "last_update": "2019-06-06 20:48:16", "league": "MLB"},
{"home_team": "San Diego Padres", "away_team": "Washington Nationals", "event_time": "2019-06-08 02:10:00", "home_odds": 1.87, "away_odds": 1.97, "last_update": "2019-06-06 20:48:16", "league": "MLB"},
{"home_team": "San Francisco Giants", "away_team": "Los Angeles Dodgers", "event_time": "2019-06-08 02:15:00", "home_odds": 2.85, "away_odds": 1.44, "last_update": "2019-06-06 20:48:16", "league": "MLB"}
]
MySpider:
from scrapy.spiders import Spider
from ..items import MatchItem
import json
import datetime
import dateutil.parser
class MySpider(Spider):
name = 'first_spider'
start_urls = ["https://websiteXYZ.com"]
def parse(self, response):
item = MatchItem()
timestamp = datetime.datetime.utcnow()
response_json = json.loads(response.body)
for event in response_json["el"]:
for team in event["epl"]:
if team["so"] == 1: item["home_team"] = team["pn"]
if team["so"] == 2: item["away_team"] = team["pn"]
for market in event["ml"]:
if market["mn"] == "Match result":
item["event_time"] = dateutil.parser.parse(market["dd"]).replace(tzinfo=None)
for outcome in market["msl"]:
if outcome["mst"] == "1": item["home_odds"] = outcome["msp"]
if outcome["mst"] == "X": item["draw_odds"] = outcome["msp"]
if outcome["mst"] == "2": item["away_odds"] = outcome["msp"]
if market["mn"] == 'Moneyline':
item["event_time"] = dateutil.parser.parse(market["dd"]).replace(tzinfo=None)
for outcome in market["msl"]:
if outcome["mst"] == "1": item["home_odds"] = outcome["msp"]
#if outcome["mst"] == "X": item["draw_odds"] = outcome["msp"]
if outcome["mst"] == "2": item["away_odds"] = outcome["msp"]
item["last_update"] = timestamp
item["league"] = event["scn"]
yield item
编辑:
根据以下答案,我尝试了以下两个脚本:
controller.py
import json
from scrapy import signals
from scrapy.crawler import CrawlerRunner
from twisted.internet import reactor, defer
from betsson_controlled.spiders.betsson import Betsson_Spider
from scrapy.utils.project import get_project_settings
class MyCrawlerRunner(CrawlerRunner):
def crawl(self, crawler_or_spidercls, *args, **kwargs):
# keep all items scraped
self.items = []
# create crawler (Same as in base CrawlerProcess)
crawler = self.create_crawler(crawler_or_spidercls)
# handle each item scraped
crawler.signals.connect(self.item_scraped, signals.item_scraped)
# create Twisted.Deferred launching crawl
dfd = self._crawl(crawler, *args, **kwargs)
# add callback - when crawl is done cal return_items
dfd.addCallback(self.return_items)
return dfd
def item_scraped(self, item, response, spider):
self.items.append(item)
def return_items(self, result):
return self.items
def return_spider_output(output):
return json.dumps([dict(item) for item in output])
settings = get_project_settings()
runner = MyCrawlerRunner(settings)
spider = Betsson_Spider()
deferred = runner.crawl(spider)
deferred.addCallback(return_spider_output)
reactor.run()
print(deferred)
当我执行controller.py时,我得到:
<Deferred at 0x7fb046e652b0 current result: '[{"home_team": "St. Louis Cardinals", "away_team": "Pittsburgh Pirates", "home_odds": 1.71, "away_odds": 2.19, "league": "MLB"}, {"home_team": "St. Louis Cardinals", "away_team": "Pittsburgh Pirates", "home_odds": 1.71, "away_odds": 2.19, "league": "MLB"}, {"home_team": "St. Louis Cardinals", "away_team": "Pittsburgh Pirates", "home_odds": 1.71, "away_odds": 2.19, "league": "MLB"}, {"home_team": "St. Louis Cardinals", "away_team": "Pittsburgh Pirates", "home_odds": 1.71, "away_odds": 2.19, "league": "MLB"}, {"home_team": "St. Louis Cardinals", "away_team": "Pittsburgh Pirates", "home_odds": 1.71, "away_odds": 2.19, "league": "MLB"}, {"home_team": "St. Louis Cardinals", "away_team": "Pittsburgh Pirates", "home_odds": 1.71, "away_odds": 2.19, "league": "MLB"}, {"home_team": "St. Louis Cardinals", "away_team": "Pittsburgh Pirates", "home_odds": 1.71, "away_odds": 2.19, "league": "MLB"}, {"home_team": "St. Louis Cardinals", "away_team": "Pittsburgh Pirates", "home_odds": 1.71, "away_odds": 2.19, "league": "MLB"}]'>
最佳答案
最近的编辑:在阅读CrawlerProcess vs CrawlerRunner之后,我意识到您可能想要CrawlerProcess。我必须使用Runner,因为我需要klein才能使用延迟的对象。 Process只希望在运行者希望与其他脚本/程序进行交互的地方感到scrap草。希望这会有所帮助。
您需要修改CrawlerRunner / Process并使用信号和/或回调将项目传递到CrawlerRunner中的脚本中。
How to integrate Flask & Scrapy?如果您查看顶部的选项,则使用klein扭曲和刮擦的示例,这是您要查找的示例,因为它执行相同的操作,只是在爬网后将其发送到Klein http服务器。您可以使用CrawlerRunner设置类似的方法,以在爬网时将每个项目发送到脚本。注意:收集项目后,此特定问题会将结果发送到Klein Web服务器。答案是使API收集结果并等待爬网完成并将其作为转储发送到JSON,但是您可以将这种方法应用于您的情况。主要要看的是如何对CrawlerRunner进行子类化和扩展以添加额外的功能。
您要执行的操作是执行一个单独的脚本,该脚本会导入Spider并扩展CrawlerRunner。然后,您执行此脚本,它将启动您的Twisted反应堆并使用定制化的运行器开始爬网过程。
话虽如此-这个问题可能可以在物料管道中解决。创建一个自定义项目管道并将该项目传递到脚本中,然后再返回该项目。
# main.py
import json
from scrapy import signals
from scrapy.crawler import CrawlerProcess
from twisted.internet import reactor, defer # import we missed
from myproject.spiders.mymodule import MySpiderName
from scrapy.utils.project import get_project_settings
class MyCrawlerProcess(CrawlerProcess):
def crawl(self, crawler_or_spidercls, *args, **kwargs):
# keep all items scraped
self.items = []
crawler = self.create_crawler(crawler_or_spidercls)
crawler.signals.connect(self.item_scraped, signals.item_scraped)
dfd = self._crawl(crawler, *args, **kwargs)
dfd.addCallback(self.return_items)
return dfd
def item_scraped(self, item, response, spider):
self.items.append(item)
def return_items(self, result):
return self.items
def return_spider_output(output):
return json.dumps([dict(item) for item in output])
process = MyCrawlerProcess()
deferred = process.crawl(MySpider)
deferred.addCallback(return_spider_output)
process.start() - Script should block here again but I'm not sure if it will work right without using reactor.run()
print(deferred)
同样,此代码是我尚未测试过的猜测。我希望它能使您朝更好的方向发展。
参考文献:
https://docs.scrapy.org/en/latest/topics/signals.html
https://docs.scrapy.org/en/latest/topics/practices.html?highlight=crawlerrunner
https://twistedmatrix.com/documents/16.2.0/core/howto/defer.html
关于python - 在Python脚本中使用Scrapy Spider Output的问题,我们在Stack Overflow上找到一个类似的问题:https://stackoverflow.com/questions/56816617/