scrapy多爬虫运行方法1

2019-12-06

# 通过CrawlerProcess
import scrapy
from scrapy.crawler import CrawlerProcess

class MySpider1(scrapy.Spider):
    # Your first spider definition(定义)
    name = "Standard"
    # 只爬取包含这里面域名的网站
    allowed_domains = []
    start_urls = ["http://www.ip138.com"]
    ...

class MySpider2(scrapy.Spider):
    # Your second spider definition
    ...

process = CrawlerProcess()
process.crawl(MySpider1)
process.crawl(MySpider2)
process.start()

{/if}