我可太懒了,不想写过程了,直接贴代码吧,我尽量每句代码都写上备注,但是我实在是无心工作啊啊啊啊啊啊啊啊 啊啊啊啊
哪里有疑惑的地方,可以评论告诉我,大家一起探讨
spider文件
import execjs
import scrapy
import json
import time
import requests
from lxml.etree import HTML
from bs4 import BeautifulSoup
from ..items import VideoUrlSpiderItem
class Music163Spider(scrapy.Spider):
name = 'music163'
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.keywords = ['丑八怪', '薛之谦'] # 根据关键词搜索爬取所有的音乐,及专辑内的音乐(我真不是薛之谦的歌迷,就是一下子想到了丑八怪这首歌)
self.js_code = str(open(r'core1.js', 'r', encoding='utf-8').read()) # 读取js文件
def start_requests(self):
for keyword in self.keywords:
url = 'http://music.163.com/weapi/cloudsearch/get/web?csrf_token=' # post接口
js_params = self.get_params_js(keyword) # 调用js文件来获取post请求所需要的参数,就是下面两个
data = {
'params': js_params['encText'],
'encSecKey': js_params['encSecKey'],
}
yield scrapy.FormRequest(url, formdata=data, callback=self.parse, dont_filter=True) # 发送post请求
def parse(self, response):
content = json.loads(response.text)
song_list = content['result']['songs']
# 获取搜索页面单曲部分的内容
for song in song_list:
item = VideoUrlSpiderItem()
item['title'] = song['name']
song_id = song['id']
item['author'] = song['ar'][0]['name']
item['video_url'] = 'https://music.163.com/song?id={}'.format(song_id)
download_url = 'http://music.163.com/song/media/outer/url?id={}.MP3'.format(song_id)
item['download_url'] = download_url # 这部分的download_url还不是真正的下载url, 请求时还会重定向,我们需要获取的重定向后的url
# 标准格式时间
publish_time = int(int(song['publishTime']) // 1000)
timeArray = time.localtime(publish_time)
publish_time = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
item['publish_time'] = publish_time
yield scrapy.Request(download_url, callback=self.parse_detail, meta={'item': item}, dont_filter=True)
# 获取搜索页面专辑部分内容
for song in song_list:
album_id = song['al']['id'] # 专辑id
url = 'https://music.163.com/album?id={}'.format(album_id) # 专辑url
yield scrapy.Request(url, callback=self.parse_album, meta=meta, dont_filter=True)
def parse_album(self, response):
s = BeautifulSoup(response.text, 'lxml')
main = s.find('ul', {'class', 'f-hide'})
for music in main.find_all('a'):
item = VideoUrlSpiderItem()
item['video_url'] = 'https://music.163.com' + music['href']
download_url = 'http://music.163.com/song/media/outer/url' + music['href'][5:] + '.mp3'
item['download_url'] = download_url # 同上理由
source = requests.get(parse_url).text
response = HTML(source)
item['author'] = response.xpath('//meta[@property="og:music:artist"]/@content')[0]
item['title'] = music.text
yield scrapy.Request(download_url, callback=self.parse_detail, meta={'item': item})
def get_params_js(self, keyword):
"""
加载本地js获取参数
:param keyword: 搜索关键词
:return:
"""
# 这块的这些值好像是打断点确定的,大概我两三个月前敲的代码,我给忘了。感兴趣的可以自己操作,然后打断点,看看
p2 = '0******1'
p3 = '00e0b509*********************************************************2b8e7'
p4 = '0****************'
js_params = execjs.compile(self.js_code).call('d',
'{"hlpretag":"","hlposttag":"","s":"' + keyword + '","type":"1","offset":"0","total":"true","limit":"30","csrf_token":""}',
p2, p3, p4)
return js_params
def parse_detail(self, response):
item = response.meta['item']
item['download_url'] = response.url # 重定向后的内容,这个url就可以直接下载了。我就不写了,因为我是通过redis做去重,所以item直接存到redis中,有专门下载的爬虫,从redis中读取,然后下载。
return item
js代码
js代码就放在这个url里了,也就一积分。https://download.csdn.net/download/Candance_star/13130571
再不能通过审核,我就以后不发csdn了。。。。。。