python 爬取影视网站下载链接
https://github.com/GriffinLewis2001/Python_movie_links_scraper
运行效果

import requests,refrom requests.cookies import RequestsCookieJarfrom fake_useragent import UserAgentimport os,pickle,threading,timeimport concurrent.futuresfrom goto import with_goto爬虫主代码
def get_content_url_name(url): send_headers = { 'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36', 'Connection': 'keep-alive', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Accept-Language': 'zh-CN,zh;q=0.8'} cookie_jar = RequestsCookieJar() cookie_jar.set('mttp', '9740fe449238', domain='www.yikedy.co') response=requests.get(url,send_headers,cookies=cookie_jar) response.encoding=’utf-8’ content=response.text reg=re.compile(r’<a href='https://www.xxx.com.cn/bcjs/(.*?)' rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' ’) url_name_list=reg.findall(content) return url_name_listdef get_content(url): send_headers = { 'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36', 'Connection': 'keep-alive', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Accept-Language': 'zh-CN,zh;q=0.8'} cookie_jar = RequestsCookieJar() cookie_jar.set('mttp', '9740fe449238', domain='www.yikedy.co') response=requests.get(url,send_headers,cookies=cookie_jar) response.encoding=’utf-8’ return response.textdef search_durl(url): content=get_content(url) reg=re.compile(r'{’x64x65x63x72x69x70x74x50x61x72x61x6d’:’(.*?)’}') index=reg.findall(content)[0] download_url=url[:-5]+r’/downloadList?decriptParam=’+index content=get_content(download_url) reg1=re.compile(r’title='.*?' href='https://www.xxx.com.cn/bcjs/(.*?)' rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' ’) download_list=reg1.findall(content) return download_listdef get_page(url): send_headers = { 'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36', 'Connection': 'keep-alive', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Accept-Language': 'zh-CN,zh;q=0.8'} cookie_jar = RequestsCookieJar() cookie_jar.set('mttp', '9740fe449238', domain='www.yikedy.co') response=requests.get(url,send_headers,cookies=cookie_jar) response.encoding=’utf-8’ content=response.text reg=re.compile(r’<a target='_blank' href='https://www.haobala.com/bcjs/(.*?)' rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' >(.*?)</a>’) url_name_list=reg.findall(content) return url_name_list@with_gotodef main(): print('=========================================================') name=input('请输入剧名(输入quit退出):') if name == 'quit':exit() url='http://www.yikedy.co/search?query='+name dlist=get_page(url) print('n') if(dlist):num=0count=0for i in dlist: if (name in i[1]) :print(f'{num} {i[1]}')num+=1 elif num==0 and count==len(dlist)-1:goto .end count+=1dest=int(input('nn请输入剧的编号(输100跳过此次搜寻):'))if dest == 100: goto .endx=0print('n以下为下载链接:n')for i in dlist: if (name in i[1]):if(x==dest): for durl in search_durl(i[0]):print(f'{durl}n') print('n') breakx+=1 else:label .endprint('没找到或不想看n')完整代码
import requests,refrom requests.cookies import RequestsCookieJarfrom fake_useragent import UserAgentimport os,pickle,threading,timeimport concurrent.futuresfrom goto import with_gotodef get_content_url_name(url): send_headers = { 'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36', 'Connection': 'keep-alive', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Accept-Language': 'zh-CN,zh;q=0.8'} cookie_jar = RequestsCookieJar() cookie_jar.set('mttp', '9740fe449238', domain='www.yikedy.co') response=requests.get(url,send_headers,cookies=cookie_jar) response.encoding=’utf-8’ content=response.text reg=re.compile(r’<a href='https://www.haobala.com/bcjs/(.*?)' rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' ’) url_name_list=reg.findall(content) return url_name_listdef get_content(url): send_headers = { 'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36', 'Connection': 'keep-alive', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Accept-Language': 'zh-CN,zh;q=0.8'} cookie_jar = RequestsCookieJar() cookie_jar.set('mttp', '9740fe449238', domain='www.yikedy.co') response=requests.get(url,send_headers,cookies=cookie_jar) response.encoding=’utf-8’ return response.textdef search_durl(url): content=get_content(url) reg=re.compile(r'{’x64x65x63x72x69x70x74x50x61x72x61x6d’:’(.*?)’}') index=reg.findall(content)[0] download_url=url[:-5]+r’/downloadList?decriptParam=’+index content=get_content(download_url) reg1=re.compile(r’title='.*?' href='https://www.xxx.com.cn/bcjs/(.*?)' rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' ’) download_list=reg1.findall(content) return download_listdef get_page(url): send_headers = { 'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36', 'Connection': 'keep-alive', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Accept-Language': 'zh-CN,zh;q=0.8'} cookie_jar = RequestsCookieJar() cookie_jar.set('mttp', '9740fe449238', domain='www.yikedy.co') response=requests.get(url,send_headers,cookies=cookie_jar) response.encoding=’utf-8’ content=response.text reg=re.compile(r’<a target='_blank' href='https://www.xxx.com.cn/bcjs/(.*?)' rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' rel='external nofollow' >(.*?)</a>’) url_name_list=reg.findall(content) return url_name_list@with_gotodef main(): print('=========================================================') name=input('请输入剧名(输入quit退出):') if name == 'quit':exit() url='http://www.xxx.com/search?query='+name dlist=get_page(url) print('n') if(dlist):num=0count=0for i in dlist: if (name in i[1]) :print(f'{num} {i[1]}')num+=1 elif num==0 and count==len(dlist)-1:goto .end count+=1dest=int(input('nn请输入剧的编号(输100跳过此次搜寻):'))if dest == 100: goto .endx=0print('n以下为下载链接:n')for i in dlist: if (name in i[1]):if(x==dest): for durl in search_durl(i[0]):print(f'{durl}n') print('n') breakx+=1 else:label .endprint('没找到或不想看n')print('本软件由CLY.所有nn')while(True): main()
以上就是python 爬取影视网站下载链接的详细内容,更多关于python 爬取下载链接的资料请关注好吧啦网其它相关文章!
相关文章:
1. .NET的基元类型包括什么及Unmanaged和Blittable类型详解2. docker 使用CMD或者ENTRYPOINT命令同时启动多个服务3. IntelliJ IDEA恢复删除文件的方法4. java编写一个花名随机抽取器的实现示例5. IntelliJ IDEA 下载安装超详细教程(推荐)6. python GUI库图形界面开发之PyQt5中QWebEngineView内嵌网页与Python的数据交互传参详细方法实例7. PHP ob缓存以及ob函数原理实例解析8. vue 组件简介9. Vue项目中如何封装axios(统一管理http请求)10. android 禁止第三方apk安装和卸载的方法详解

网公网安备