scrapy运行多爬虫方法2

2019-12-06

# 通过CrawlerRunner
import scrapy
from twisted.internet import reactor
from scrapy.crawler import CrawlerRunner
from scrapy.utils.log import configure_logging

class MySpider1(scrapy.Spider):
    # Your first spider definition
    name = "Standard"
    # 只爬取包含这里面域名的网站
    allowed_domains = []
    start_urls = ["http://www.ip138.com"]
    ...

class MySpider2(scrapy.Spider):
    # Your second spider definition
    ...

configure_logging()
runner = CrawlerRunner()
runner.crawl(MySpider1)
runner.crawl(MySpider2)
d = runner.join()
d.addBoth(lambda _: reactor.stop())

reactor.run()

{/if}