import scrapy
class QuotesSpider(scrapy.Spider):
name = “quotes”
def start_request(self):
start_urls = [
"http://quotes.toscrape.com/page/1/",
"http://quotes.toscrape.com/page/2/",
]
for url in start_urls:
yield scrapy.Request(url=url,callback=self.parse)
def parse(self, response):
page = response.url.split("/")[-2]
filename = 'quotes-%s.html' % page
with open(filename, 'wb') as f:
f.write(response.body)
self.log("saved file %s"% filename)
this is my code while running it on command prompt and error is showing that’s it’s a keyerror spider not found .