python爬取今日头条后台数据_爬虫爬取今日头条数据代码实现

课程链接

讲师的公众号文章:今日头条数据抓取及持久化(完整代码版,含IP和用户代理)​mp.weixin.qq.comb93a1e986a70fb6d94565d8b7857097c.png

课程代码

抓取并持久化user-agent工具utils.py

对于爬虫工具,需要设置发起请求的user-agent,import fake-useragent可以实现,但是每次更换user-agent都要发起网络请求,并不是好办法,所以比较好的做法是:抓取文件,本地json存储备用。

怎么找到fake-useragent到哪里取数的,截图如下:

utils.py代码如下:

"""

这个工具的目的是把fake_useragent的user_agent下载下来,存储文件使用

from fake_useragent import UserAgent

https://fake-useragent.herokuapp.com/browsers/0.1.11

"""

import requests

import json

import random

"""

json的方法中 dump,dumps,load,loads的区别

dump/load,是操作文件的

dumps/loads,是处理json和字符串直接相互转换的

作业:拿到标签并存储起来

"""

browser_info_file = "browser_info.json"

user_agent_url = "https://fake-useragent.herokuapp.com/browsers/0.1.11"

def write_browser_info_to_file():

my_user_agent = requests.get(user_agent_url)

with open(browser_info_file, "w") as f:

# 写进去就是JSON,方便用json.load读取

json.dump(my_user_agent.text, f)

# f.write 的写入方式,是个字符串,json.load就会有问题

# f.write(my_user_agent.text)

def get_random_browser():

file = open(browser_info_file, 'r')

#

agent_json = json.load(file)

#

agent_json = json.loads(agent_json)

browsers = agent_json["browsers"]

# 随机出来一个浏览器类型

i = random.randint(0, len(browsers) - 1)

if i == 0:

browser_name = "chrome"

elif i == 1:

browser_name = "opera"

elif i == 2:

browser_name = "internetexplorer"

else:

browser_name = "safari"

final_browser = browsers[browser_name][random.randint(0, len(browsers[browser_name]))]

return final_browser

if __name__ == '__main__':

write_browser_info_to_file()

print(get_random_browser())

抓取热门文章tags并存储-toutiao_spider.py

import requests

import time

import json

import pandas as pd

import re

from part1.utils import get_random_browser

# 定义Http请求的headers/proxies

def get_request_url_and_headers():

user_agent = get_random_browser()

current_time = int(time.time())

# 我要到哪里去

# 头条的这个,去掉:_signature,否则抓不到数

request_url = "https://www.toutiao.com/api/pc/feed/?max_behot_time=" \

+ str(current_time) \

+ "&category=news_hot&utm_source=toutiao&widen=1&tadrequire=true&as=A1751D55DE389D7&cp" \

"=5D5E38E9CDA7BE1"

# 我怎么来,我骑着自行车来

headers = {

"user-agent": user_agent

}

# 我从哪里来,我从东土大唐而来

# 搜索引擎搜"免费IP地址"能找到可用的代理

proxies = {

"url": "http://114.235.23.172:9000"

}

return request_url, headers, proxies

# 发起Http请求,抓取网页内容,如果发生错误,重试一次

def get_response_json():

request_url, headers, proxies = get_request_url_and_headers()

response = requests.get(request_url, headers=headers, proxies=proxies)

# print(response.text.encode("utf-8").decode("unicode_escape"))

response_json = json.loads(response.text)

if response_json["message"] == "error":

response_json = get_response_json()

return response_json

# 抓取头条热点新闻数据,存入json备用

def data_to_file():

data = get_response_json()["data"]

for i in range(len(data)):

data_dict = data[i]

with open("toutiao.json", "a+") as f:

json.dump(data_dict, f, ensure_ascii=False)

f.write("\n")

df = pd.read_json("toutiao.json", lines=True)

df.to_excel("toutiao.xlsx")

# 抓取热门文章的标签,并存储备用

def get_news_tag():

df = pd.read_json("toutiao.json", lines=True)

tags_pure = []

request_url, headers, proxies = get_request_url_and_headers()

for i in range(len(df)):

news_url = "https://www.toutiao.com" + df["source_url"][i]

# print(news_url)

response = requests.get(news_url, headers=headers, proxies=proxies)

reg = r'"name":".*?"}'

tags = re.findall(reg, response.text)

tags_pure_row = []

if len(tags) <= 0:

tags_pure.append([])

continue

for j in range(len(tags)):

tags_pure_row.append(tags[j].replace('"name":"', '').replace('"}', ''))

tags_pure.append(tags_pure_row)

df["tags"] = tags_pure

df.to_excel("toutiao_with_tags.xlsx")

if __name__ == '__main__':

# print(get_request_url_and_headers())

# print(get_response_html())

# data_to_file()

get_news_tag()

实现展示


版权声明:本文为weixin_42519126原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接和本声明。