更新线路

整体线路:v20250723
南风线路:v20250724
潇洒线路:v07.24.3
This commit is contained in:
Liu
2025-07-24 14:00:50 +08:00
parent 4b7f02622a
commit 993f56c921
34 changed files with 15935 additions and 2860 deletions

314
潇洒/py/哔哩直播.py Normal file
View File

@@ -0,0 +1,314 @@
# coding=utf-8
# !/usr/bin/python
"""
作者 丢丢喵 🚓 内容均从互联网收集而来 仅供交流学习使用 版权归原创者所有 如侵犯了您的权益 请通知作者 将及时删除侵权内容
====================Diudiumiao====================
"""
from Crypto.Util.Padding import unpad
from Crypto.Util.Padding import pad
from urllib.parse import unquote
from Crypto.Cipher import ARC4
from urllib.parse import quote
from base.spider import Spider
from Crypto.Cipher import AES
from datetime import datetime
from bs4 import BeautifulSoup
from base64 import b64decode
import urllib.request
import urllib.parse
import datetime
import binascii
import requests
import base64
import json
import time
import sys
import re
import os
sys.path.append('..')
xurl = "https://search.bilibili.com"
xurl1 = "https://api.live.bilibili.com"
headerx = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36 Edg/129.0.0.0'
}
class Spider(Spider):
global xurl
global xurl1
global headerx
def getName(self):
return "首页"
def init(self, extend):
pass
def isVideoFormat(self, url):
pass
def manualVideoCheck(self):
pass
def extract_middle_text(self, text, start_str, end_str, pl, start_index1: str = '', end_index2: str = ''):
if pl == 3:
plx = []
while True:
start_index = text.find(start_str)
if start_index == -1:
break
end_index = text.find(end_str, start_index + len(start_str))
if end_index == -1:
break
middle_text = text[start_index + len(start_str):end_index]
plx.append(middle_text)
text = text.replace(start_str + middle_text + end_str, '')
if len(plx) > 0:
purl = ''
for i in range(len(plx)):
matches = re.findall(start_index1, plx[i])
output = ""
for match in matches:
match3 = re.search(r'(?:^|[^0-9])(\d+)(?:[^0-9]|$)', match[1])
if match3:
number = match3.group(1)
else:
number = 0
if 'http' not in match[0]:
output += f"#{match[1]}${number}{xurl}{match[0]}"
else:
output += f"#{match[1]}${number}{match[0]}"
output = output[1:]
purl = purl + output + "$$$"
purl = purl[:-3]
return purl
else:
return ""
else:
start_index = text.find(start_str)
if start_index == -1:
return ""
end_index = text.find(end_str, start_index + len(start_str))
if end_index == -1:
return ""
if pl == 0:
middle_text = text[start_index + len(start_str):end_index]
return middle_text.replace("\\", "")
if pl == 1:
middle_text = text[start_index + len(start_str):end_index]
matches = re.findall(start_index1, middle_text)
if matches:
jg = ' '.join(matches)
return jg
if pl == 2:
middle_text = text[start_index + len(start_str):end_index]
matches = re.findall(start_index1, middle_text)
if matches:
new_list = [f'{item}' for item in matches]
jg = '$$$'.join(new_list)
return jg
def homeContent(self, filter):
result = {}
result = {"class": [{"type_id": "", "type_name": "舞蹈"},
{"type_id": "音乐", "type_name": "音乐"},
{"type_id": "手游", "type_name": "手游"},
{"type_id": "网游", "type_name": "网游"},
{"type_id": "单机游戏", "type_name": "单机游戏"},
{"type_id": "虚拟主播", "type_name": "虚拟主播"},
{"type_id": "电台", "type_name": "电台"},
{"type_id": "体育", "type_name": "体育"},
{"type_id": "聊天", "type_name": "聊天"},
{"type_id": "娱乐", "type_name": "娱乐"},
{"type_id": "电影", "type_name": "影视"},
{"type_id": "新闻", "type_name": "新闻"}]
}
return result
def homeVideoContent(self):
pass
def categoryContent(self, cid, pg, filter, ext):
result = {}
videos = []
if pg:
page = int(pg)
else:
page = 1
url = f'{xurl}/live?keyword={cid}&page={str(page)}'
detail = requests.get(url=url, headers=headerx)
detail.encoding = "utf-8"
res = detail.text
doc = BeautifulSoup(res, "lxml")
soups = doc.find_all('div', class_="video-list-item")
for vod in soups:
names = vod.find('h3', class_="bili-live-card__info--tit")
name = names.text.strip().replace('直播中', '')
id = names.find('a')['href']
id = self.extract_middle_text(id, 'bilibili.com/', '?', 0)
pic = vod.find('img')['src']
if 'http' not in pic:
pic = "https:" + pic
remarks = vod.find('a', class_="bili-live-card__info--uname")
remark = remarks.text.strip()
video = {
"vod_id": id,
"vod_name": name,
"vod_pic": pic,
"vod_remarks": remark
}
videos.append(video)
result = {'list': videos}
result['page'] = pg
result['pagecount'] = 9999
result['limit'] = 90
result['total'] = 999999
return result
def detailContent(self, ids):
did = ids[0]
result = {}
videos = []
xianlu = ''
bofang = ''
url = f'{xurl1}/xlive/web-room/v2/index/getRoomPlayInfo?room_id={did}&platform=web&protocol=0,1&format=0,1,2&codec=0,1'
detail = requests.get(url=url, headers=headerx)
detail.encoding = "utf-8"
data = detail.json()
content = '欢迎观看哔哩直播'
setup = data['data']['playurl_info']['playurl']['stream']
nam = 0
for vod in setup:
try:
host = vod['format'][nam]['codec'][0]['url_info'][1]['host']
except (KeyError, IndexError):
continue
base = vod['format'][nam]['codec'][0]['base_url']
extra = vod['format'][nam]['codec'][0]['url_info'][1]['extra']
id = host + base + extra
nam = nam + 1
namc = f"{nam}号线路"
bofang = bofang + namc + '$' + id + '#'
bofang = bofang[:-1]
xianlu = '哔哩专线'
videos.append({
"vod_id": did,
"vod_content": content,
"vod_play_from": xianlu,
"vod_play_url": bofang
})
result['list'] = videos
return result
def playerContent(self, flag, id, vipFlags):
result = {}
result["parse"] = 0
result["playUrl"] = ''
result["url"] = id
result["header"] = headerx
return result
def searchContentPage(self, key, quick, pg):
result = {}
videos = []
if pg:
page = int(pg)
else:
page = 1
url = f'{xurl}/live?keyword={key}&page={str(page)}'
detail = requests.get(url=url, headers=headerx)
detail.encoding = "utf-8"
res = detail.text
doc = BeautifulSoup(res, "lxml")
soups = doc.find_all('div', class_="video-list-item")
for vod in soups:
names = vod.find('h3', class_="bili-live-card__info--tit")
name = names.text.strip().replace('直播中', '')
id = names.find('a')['href']
id = self.extract_middle_text(id, 'bilibili.com/', '?', 0)
pic = vod.find('img')['src']
if 'http' not in pic:
pic = "https:" + pic
remarks = vod.find('a', class_="bili-live-card__info--uname")
remark = remarks.text.strip()
video = {
"vod_id": id,
"vod_name": name,
"vod_pic": pic,
"vod_remarks": remark
}
videos.append(video)
result['list'] = videos
result['page'] = pg
result['pagecount'] = 9999
result['limit'] = 90
result['total'] = 999999
return result
def searchContent(self, key, quick, pg="1"):
return self.searchContentPage(key, quick, '1')
def localProxy(self, params):
if params['type'] == "m3u8":
return self.proxyM3u8(params)
elif params['type'] == "media":
return self.proxyMedia(params)
elif params['type'] == "ts":
return self.proxyTs(params)
return None

View File

@@ -146,7 +146,6 @@ class Spider(Spider):
result = {}
cateManual = {
"虎牙": "huya",
"哔哩": "bili",
"抖音": "douyin",
"斗鱼": "douyu",
"网易": "wangyi"

220
潇洒/py/零度影视.py Normal file
View File

@@ -0,0 +1,220 @@
# -*- coding: utf-8 -*-
# by @嗷呜
import json
import random
import sys
from base64 import b64encode, b64decode
from concurrent.futures import ThreadPoolExecutor
sys.path.append('..')
from base.spider import Spider
class Spider(Spider):
def init(self, extend=""):
did=self.getdid()
self.headers.update({'deviceId': did})
token=self.gettk()
self.headers.update({'token': token})
pass
def getName(self):
pass
def isVideoFormat(self, url):
pass
def manualVideoCheck(self):
pass
def destroy(self):
pass
host='http://ldys.sq1005.top'
headers = {
'User-Agent': 'okhttp/4.12.0',
'client': 'app',
'deviceType': 'Android'
}
def homeContent(self, filter):
data=self.post(f"{self.host}/api/v1/app/screen/screenType", headers=self.headers).json()
result = {}
cate = {
"类型": "classify",
"地区": "region",
"年份": "year"
}
sort={
'key':'sreecnTypeEnum',
'name': '排序',
'value':[{'n':'最新','v':'NEWEST'},{'n':'人气','v':'POPULARITY'},{'n':'评分','v':'COLLECT'},{'n':'热搜','v':'HOT'}]
}
classes = []
filters = {}
for k in data['data']:
classes.append({
'type_name': k['name'],
'type_id': k['id']
})
filters[k['id']] = []
for v in k['children']:
filters[k['id']].append({
'name': v['name'],
'key': cate[v['name']],
'value':[{'n':i['name'],'v':i['name']} for i in v['children']]
})
filters[k['id']].append(sort)
result['class'] = classes
result['filters'] = filters
return result
def homeVideoContent(self):
jdata={"condition":64,"pageNum":1,"pageSize":40}
data=self.post(f"{self.host}/api/v1/app/recommend/recommendSubList", headers=self.headers, json=jdata).json()
return {'list':self.getlist(data['data']['records'])}
def categoryContent(self, tid, pg, filter, extend):
jdata = {
'condition': {
'sreecnTypeEnum': 'NEWEST',
'typeId': tid,
},
'pageNum': int(pg),
'pageSize': 40,
}
jdata['condition'].update(extend)
data = self.post(f"{self.host}/api/v1/app/screen/screenMovie", headers=self.headers, json=jdata).json()
result = {}
result['list'] = self.getlist(data['data']['records'])
result['page'] = pg
result['pagecount'] = 9999
result['limit'] = 90
result['total'] = 999999
return result
def detailContent(self, ids):
ids = ids[0].split('@@')
jdata = {"id": int(ids[0]), "typeId": ids[-1]}
v = self.post(f"{self.host}/api/v1/app/play/movieDesc", headers=self.headers, json=jdata).json()
v = v['data']
vod = {
'type_name': v.get('classify'),
'vod_year': v.get('year'),
'vod_area': v.get('area'),
'vod_actor': v.get('star'),
'vod_director': v.get('director'),
'vod_content': v.get('introduce'),
'vod_play_from': '',
'vod_play_url': ''
}
c = self.post(f"{self.host}/api/v1/app/play/movieDetails", headers=self.headers, json=jdata).json()
l = c['data']['moviePlayerList']
n = {str(i['id']): i['moviePlayerName'] for i in l}
m = jdata.copy()
m.update({'playerId': str(l[0]['id'])})
pd = self.getv(m, c['data']['episodeList'])
if len(l)-1:
with ThreadPoolExecutor(max_workers=len(l)-1) as executor:
future_to_player = {executor.submit(self.getd, jdata, player): player for player in l[1:]}
for future in future_to_player:
try:
o,p = future.result()
pd.update(self.getv(o,p))
except Exception as e:
print(f"请求失败: {e}")
w, e = [],[]
for i, x in pd.items():
if x:
w.append(n[i])
e.append(x)
vod['vod_play_from'] = '$$$'.join(w)
vod['vod_play_url'] = '$$$'.join(e)
return {'list': [vod]}
def searchContent(self, key, quick, pg="1"):
jdata={
"condition": {
"value": key
},
"pageNum": int(pg),
"pageSize": 40
}
data=self.post(f"{self.host}/api/v1/app/search/searchMovie", headers=self.headers, json=jdata).json()
return {'list':self.getlist(data['data']['records']),'page':pg}
def playerContent(self, flag, id, vipFlags):
jdata=json.loads(self.d64(id))
data = self.post(f"{self.host}/api/v1/app/play/movieDetails", headers=self.headers, json=jdata).json()
try:
params={'playerUrl':data['data']['url'],'playerId':jdata['playerId']}
pd=self.fetch(f"{self.host}/api/v1/app/play/analysisMovieUrl", headers=self.headers, params=params).json()
url,p=pd['data'],0
except Exception as e:
print(f"请求失败: {e}")
url,p=data['data']['url'],0
return {'parse': p, 'url': url, 'header': {'User-Agent': 'okhttp/4.12.0'}}
def localProxy(self, param):
pass
def liveContent(self, url):
pass
def gettk(self):
data=self.fetch(f"{self.host}/api/v1/app/user/visitorInfo", headers=self.headers).json()
return data['data']['token']
def getdid(self):
did=self.getCache('ldid')
if not did:
hex_chars = '0123456789abcdef'
did =''.join(random.choice(hex_chars) for _ in range(16))
self.setCache('ldid',did)
return did
def getd(self,jdata,player):
x = jdata.copy()
x.update({'playerId': str(player['id'])})
response = self.post(f"{self.host}/api/v1/app/play/movieDetails", headers=self.headers, json=x).json()
return x, response['data']['episodeList']
def getv(self,d,c):
f={d['playerId']:''}
g=[]
for i in c:
j=d.copy()
j.update({'episodeId':str(i['id'])})
g.append(f"{i['episode']}${self.e64(json.dumps(j))}")
f[d['playerId']]='#'.join(g)
return f
def getlist(self,data):
videos = []
for i in data:
videos.append({
'vod_id': f"{i['id']}@@{i['typeId']}",
'vod_name': i.get('name'),
'vod_pic': i.get('cover'),
'vod_year': i.get('year'),
'vod_remarks': i.get('totalEpisode')
})
return videos
def e64(self, text):
try:
text_bytes = text.encode('utf-8')
encoded_bytes = b64encode(text_bytes)
return encoded_bytes.decode('utf-8')
except Exception as e:
print(f"Base64编码错误: {str(e)}")
return ""
def d64(self,encoded_text):
try:
encoded_bytes = encoded_text.encode('utf-8')
decoded_bytes = b64decode(encoded_bytes)
return decoded_bytes.decode('utf-8')
except Exception as e:
print(f"Base64解码错误: {str(e)}")
return ""