scrapy中的request

scrapy中的request
初始化参数
class scrapy.http.Request(
url [ ,
callback,
method=‘GET’,
headers,
body,
cookies,
meta,
encoding=‘utf-8’,
priority=0,
don‘t_filter=False,
errback ] ) 1,生成Request的方法
def parse_page1(self, response):

return scrapy.Request(&#34;http://www.example.com/some_page.html&#34;,<br/>
                      callback=self.parse_page2)

def parse_page2(self, response):

# this would log http://www.example.com/some_page.html<br/>
self.logger.info(&#34;Visited %s&#34;, response.url)

2,通过Request传递数据的方法
def parse_page1(self, response):

item = MyItem()<br/>
item[&#39;main_url&#39;] = response.url<br/>
request = scrapy.Request(&#34;http://www.example.com/some_page.html&#34;,<br/>
                         callback=self.parse_page2)<br/>
request.meta[&#39;item&#39;] = item<br/>
yield request

def parse_page2(self, response):

item = response.meta[&#39;item&#39;]<br/>
item[&#39;other_url&#39;] = response.url<br/>
yield item

3,Request.meta中的特殊关键字 4,主要子类FormRequest,用于登陆
return [FormRequest(url=“http://www.example.com/post/action&#34;,

                formdata={&#39;name&#39;: &#39;John Doe&#39;, &#39;age&#39;: &#39;&#39;},<br/>
                callback=self.after_post)]

更相信的登陆的例子
import scrapy class LoginSpider(scrapy.Spider):

name = &#39;example.com&#39;<br/>
start_urls = [&#39;http://www.example.com/users/login.php&#39;]

def parse(self, response):

    return scrapy.FormRequest.from_response(<br/>
        response,<br/>
        formdata={&#39;username&#39;: &#39;john&#39;, &#39;password&#39;: &#39;secret&#39;},<br/>
        callback=self.after_login<br/>
    )

def after_login(self, response):

    # check login succeed before going on<br/>
    if &#34;authentication failed&#34; in response.body:<br/>
        self.logger.error(&#34;Login failed&#34;)<br/>
        return

continue scraping with authenticated session…