怎么使用Python爬虫

介绍

本篇内容介绍了“怎么使用Python爬虫”的有关知识,在实际案例的操作过程中,不少人都会遇到这样的困境,接下来就让小编带领大家学习一下如何处理这些情况吧!希望大家仔细阅读,能够学有所成!

1。导入模块

 import  re 得到bs4  import  BeautifulSoup  import  requests  import  time  import  json  import  pandas  as  pd  import  numpy  as  np 

2。状态码

 r =, requests.get (& # 39; https://github.com/explore& # 39;), r.status_code 

3。爬取*乎

#浏览器header和cookies headers = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.87 Safari/537.36'} cookies = {'cookie':'_zap=3d979dbb-f25b-4014-8770-89045dec48f6; d_c0="APDvML4koQ-PTqFU56egNZNd2wd-eileT3E=|1561292196"; tst=r; _ga=GA1.2.910277933.1582789012; q_c1=9a429b07b08a4ae1afe0a99386626304|1584073146000|1561373910000; _xsrf=bf1c5edf-75bd-4512-8319-02c650b7ad2c; _gid=GA1.2.1983259099.1586575835; l_n_c=1; l_cap_id="NDIxM2M4OWY4N2YwNDRjM2E3ODAxMDdmYmY2NGFiMTQ=|1586663749|ceda775ba80ff485b63943e0baf9968684237435"; r_cap_id="OWY3OGQ1MDJhMjFjNDBiYzk0MDMxMmVlZDIwNzU0NzU=|1586663749|0948d23c731a8fa985614d3ed58edb6405303e99"; cap_id="M2I5NmJkMzRjMjc3NGZjNDhiNzBmNDMyNDQ3NDlmNmE=|1586663749|dacf440ab7ad64214a939974e539f9b86ddb9eac"; n_c=1; Hm_lvt_98beee57fd2ef70ccdd5ca52b9740c49=1586585625,1586587735,1586667228,1586667292; Hm_lpvt_98beee57fd2ef70ccdd5ca52b9740c49=1586667292; SESSIONID=GWBltmMTwz5oFeBTjRm4Akv8pFF6p8Y6qWkgUP4tjp6; JOID=UVkSBEJI6EKgHAipMkwAEWAkvEomDbkAwmJn4mY1kHHPVGfpYMxO3voUDK88UO62JqgwW5Up4hC2kX_KGO9xoKI=; osd=UlEXAU5L4EelEAuhN0kMEmghuUYlBbwFzmFv52M5k3nKUWvqaMlL0vkcCaowU-azI6QzU5As7hO-lHrGG-d0pa4=; capsion_ticket="2|1:0|10:1586667673|14:capsion_ticket|44:YTJkYmIyN2Q4YWI4NDI0Mzk0NjQ1YmIwYmUxZGYyNzY=|b49eb8176314b73e0ade9f19dae4b463fb970c8cbd1e6a07a6a0e535c0ab8ac3"; z_c0="2|1:0|10:1586667694|4:z_c0|92:Mi4xOGc1X0dnQUFBQUFBOE84d3ZpU2hEeVlBQUFCZ0FsVk5ydTVfWHdDazlHMVM1eFU5QjlqamJxWVhvZ2xuWlhTaVJ3|bcd3601ae34951fe72fd3ffa359bcb4acd60462715edcd1e6c4e99776f9543b3"; unlock_ticket="AMCRYboJGhEmAAAAYAJVTbankl4i-Y7Pzkta0e4momKdPG3NRc6GUQ=="; KLBRSID=fb3eda1aa35a9ed9f88f346a7a3ebe83|1586667697|1586660346'}  start_url = 'https://www.zhihu.com/api/v3/feed/topstory/recommend?session_token=c03069ed8f250472b687fd1ee704dd5b&desktop=true&page_number=5&limit=6&action=pull&ad_interval=-1&before_id=23'

4. beautifulsoup解析

<>以前s =, requests.Session (), start_url =, & # 39; https://www.zhihu.com/& # 39;, html =, s.get (=url  start_url,, headers =,头,cookies =,饼干,timeout =, 5), soup =, beautifulsoup (html.content),, question =,[], # #,名称,question_address =, [], # #, url , temp1 =, soup.find_all (& # 39; div # 39;, class_=& # 39; Card  TopstoryItem  TopstoryItem-isRecommend& # 39;), for  item 拷贝temp1:,,,,, temp2 =, item.find_all (& # 39; div # 39;, itemprop=爸?question"), #,,,,,印刷(temp2),,,,, if  temp2  !=,[]:, # # # #,存在专栏等情况,暂时跳过,,,,,,,,,question_address.append (temp2[0];(& # 39;元# 39;,itemprop=& # 39; url # 39;) . get(& # 39;内容# 39;)),,,,,,,,,question.append (temp2[0];(& # 39;元# 39;,itemprop=& # 39;名字# 39;). get(& # 39;内容# 39;))

5。以前存储信息

<> question_focus_number =,[], #关注量,question_answer_number =,[], #,回答量,for  url 拷贝question_address:,,,,, test =, s.get (=url  url, headers =,头,cookies =,饼干,timeout =, 5),,,,, soup =, BeautifulSoup (test.content),,,,, info =, soup.find_all (& # 39; div # 39;, class_=& # 39; QuestionPage& # 39;)[0], #,,,,,印刷(信息),,,,,focus_number =, info.find(& # 39;元# 39;,itemprop=癮nswerCount") . get(& # 39;内容# 39;),,,,,answer_number =, info.find(& # 39;元# 39;,itemprop=爸?followerCount") . get(& # 39;内容# 39;),,,,,question_focus_number.append (focus_number),,,,, question_answer_number.append (answer_number)

6。null

怎么使用Python爬虫