Scrapy框架: 登录网站

时间:2022-07-25
本文章向大家介绍Scrapy框架: 登录网站,主要内容包括其使用实例、应用技巧、基本知识点总结和需要注意事项,具有一定的参考价值,需要的朋友可以参考一下。

一、使用cookies登录网站

import scrapy

class LoginSpider(scrapy.Spider):
    name = 'login'
    allowed_domains = ['xxx.com']
    start_urls = ['https://www.xxx.com/xx/']

    cookies = ""

    def start_requests(self):
        for url in self.start_urls:
            yield scrapy.Request(url, cookies=self.cookies, callback=self.parse)

    def parse(self, response):
        with open("01login.html", "wb") as f:
            f.write(response.body)

二、发送post请求登录, 要手动解析网页获取登录参数

import scrapy

class LoginSpider(scrapy.Spider):
    name='login_code'
    allowed_domains = ['xxx.com']
    
    #1. 登录页面
    start_urls = ['https://www.xxx.com/login/']

    def parse(self, response):
        #2. 代码登录
        login_url='https://www.xxx.com/login'

        formdata={
            "username":"xxx",
            "pwd":"xxx",
            "formhash":response.xpath("//input[@id='formhash']/@value").extract_first(),
            "backurl":response.xpath("//input[@id='backurl']/@value").extract_first()
        }

        #3. 发送登录请求post
        yield scrapy.FormRequest(login_url, formdata=formdata, callback=self.parse_login)
    
    def parse_login(self, response):
        #4.访问目标页面
        member_url="https://www.xxx.com/member"
        yield scrapy.Request(member_url, callback=self.parse_member)
    
    def parse_member(self, response):
        with open("02login.html",'wb') as f:
            f.write(response.body)

三、发送post请求登录, 自动解析网页获取登录参数

import scrapy

class LoginSpider(scrapy.Spider):
    name='login_code2'
    allowed_domains = ['xxx.com']
    
    #1. 登录页面
    start_urls = ['https://www.xxx.com/login/']

    def parse(self, response):
        #2. 代码登录
        login_url='https://www.xxx.com/login'

        formdata={
            "username":"xxx",
            "pwd":"xxx"
        }

        #3. 发送登录请求post
        yield scrapy.FormRequest.from_response(
            response,
            formxpath="//*[@id='login_pc']",
            formdata=formdata,
            method="POST", #覆盖之前的get请求
            callback=self.parse_login
        )
    
    def parse_login(self, response):
        #4.访问目标页面
        member_url="https://www.xxx.com/member"
        yield scrapy.Request(member_url, callback=self.parse_member)
    
    def parse_member(self, response):
        with open("03login.html",'wb') as f:
            f.write(response.body)