python 爬虫

2021/10/14 22:15:44

本文主要是介绍python 爬虫,对大家解决编程问题具有一定的参考价值,需要的程序猿们随着小编来一起学习吧!

import requests,re
import io
import sys
import time
from bs4 import BeautifulSoup
import random
requests.adapters.DEFAULT_RETRIES =5
from datetime import datetime
from scrapy.http import Request
from scrapy.selector import Selector
url4="https://www.liepin.com/zhaopin/?headId=d635640db4d8b0b807d1bcc053d4918a&pubTime=30"


file="d:\\data\\"+datetime.now().strftime('%Y%m%d%H%M%S')+".txt"
file=datetime.now().strftime('%Y%m%d%H%M%S')+".txt"
f = open(file, 'w')#新建文件,能写入;如果文件存在则覆盖
def get_html(url):
    try:
        header={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.90 Safari/537.36 2345Explorer/9.3.2.17331', }
        user_agent = [ 
    "Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)", 
    "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)", 
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)", 
    "Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)", 
    "Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)", 
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)", 
    "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)", 
    "Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)", 
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)", 
    "Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6", 
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1", 
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0", 
    "Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5", 
    "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6", 
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11", 
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20", 
    "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52"
        ] 
        aaop=random.choice(user_agent)
        header={ 
        'User-Agent': aaop,  # 浏览器头部
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', # 客户端能够接收的内容类型
        'Accept-Language': 'en-US,en;q=0.5', # 浏览器可接受的语言
        }
        #'Connection': 'keep-alive', # 表示是否需要持久连接
        #ht=requests.get(url,headers=header,verify=True)
        print(aaop)
        ht=requests.get(url,headers=header)
        #ht=requests.get(url,timeout=20)
        #ht=requests.get(url)
        ht.raise_for_status
        ht.encoding=ht.apparent_encoding
        ht.encoding='utf-8'
        return ht.text
    except Exception as e:
        f.close()
        print("error!!!")
        print("error:",e)
def get_htmla(url):
    aui=0
    while aui==0:
        try:
            header={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.90 Safari/537.36 2345Explorer/9.3.2.17331', }
            user_agent = [ 
            "Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)", 
            "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)", 
            "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)", 
            "Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)", 
            "Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)", 
            "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)", 
            "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)", 
            "Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)", 
            "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)", 
            "Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6", 
            "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1", 
            "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0", 
            "Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5", 
            "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6", 
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11", 
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20", 
            "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52"
            ] 
            aaop=random.choice(user_agent)
            header={ 
            'User-Agent': aaop,  # 浏览器头部
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', # 客户端能够接收的内容类型
            'Accept-Language': 'en-US,en;q=0.5', # 浏览器可接受的语言
            }
            #'Connection': 'keep-alive', # 表示是否需要持久连接
            #ht=requests.get(url,headers=header,verify=True)
            print(aaop)
            ht=requests.get(url,headers=header)
            #ht=requests.get(url,timeout=20)
            #ht=requests.get(url)
            ht.raise_for_status
            ht.encoding=ht.apparent_encoding
            ht.encoding='utf-8'
            aui=1
            return ht.text
        except Exception as e:
            #f.close()
            aui=0
            print("error!!!")
            print("error:",e)
def jiansuo(url):
    rr=requests.get(url)
    soup=BeautifulSoup(get_htmla(url),"html.parser")
    aae=0
    for aa in soup.find_all('div'):#检索出DIV

        if aa.attrs['class']==["sk"]:#每个学校信息所在DIV都有这个class="sk"这一属性

            for bb in aa.children:

                if bb.string!=None:
                    aae=aae+1
                    print(aae)
                    #print(bb.string)
                    x=bb.string.encode('gbk','ignore').decode('gbk')
                    f.write(x+"\n")#每写入一行,最后加回车换行
def jiansuoa(url):
    rr=requests.get(url)
    soup=BeautifulSoup(get_htmla(url),"html.parser")
    aae=0
    bz=0
    for aa in soup.find_all('div'):#检索出DIV
        for bb in aa.children:
            try:
                if(bb.attrs["class"][0]=='job-detail-box'):
                    for cc in bb.children:
                       print(cc)
            except:
                continue
def jiansuoaa(url):
    rr=requests.get(url)
    soup=BeautifulSoup(get_htmla(url),"html.parser")
    aae=0
    aaf=0
    bz=0
    for aa in soup.find_all('a'):#检索出DIV
        try:
            if(aa.attrs["data-nick"]=='job-detail-job-info'):
                for bb in aa.find_all('div'):
                    try:
                        if bb.string!=None:
                            aae=aae+1
                            print(bb.string,aae)
                        #print(bb.attrs["title"],aae,type(bb.attrs))
                    except:
                        continue
        except:
            continue
        
r=requests.get(url4)
r.encoding='utf-8'
body=r.text
tx=Selector(text=body).xpath('/html/body/div[1]/div/section[1]/div/ul').extract()
tx1=re.findall('div class="job-detail-box"',str(tx))
aae=0
for  aa in tx1:
    aae=aae+1
    print(aa,aae)

 



这篇关于python 爬虫的文章就介绍到这儿,希望我们推荐的文章对大家有所帮助,也希望大家多多支持为之网!


扫一扫关注最新编程教程