更新线路

潇洒线路:v09.16
This commit is contained in:
Liu
2025-09-17 15:20:37 +08:00
parent 28b144b0ac
commit 93e29c5826
10 changed files with 66 additions and 1851 deletions

View File

@@ -1,301 +0,0 @@
# -*- coding: utf-8 -*-
# by @嗷呜
import json
import sys
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
from urllib.parse import quote
from Crypto.Hash import MD5
import requests
sys.path.append('..')
from base.spider import Spider
class Spider(Spider):
def init(self, extend=""):
self.session = requests.Session()
self.session.headers.update(self.headers)
self.session.cookies.update(self.cookie)
self.get_ctoken()
pass
def getName(self):
pass
def isVideoFormat(self, url):
pass
def manualVideoCheck(self):
pass
def destroy(self):
pass
host='https://www.youku.com'
shost='https://search.youku.com'
h5host='https://acs.youku.com'
ihost='https://v.youku.com'
headers = {
'User-Agent': 'Mozilla/5.0 (; Windows 10.0.26100.3194_64 ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Electron/14.2.0 Safari/537.36 Node/14.17.0 YoukuDesktop/9.2.60 UOSYouku (2.0.1)-Electron(UTDID ZYmGMAAAACkDAMU8hbiMmYdd;CHANNEL official;ZREAL 0;BTYPE TM2013;BRAND TIMI;BUILDVER 9.2.60.1001)',
'Referer': f'{host}/'
}
cookie={
"__ysuid": "17416134165380iB",
"__aysid": "1741613416541WbD",
"xlly_s": "1",
"isI18n": "false",
"cna": "bNdVIKmmsHgCAXW9W6yrQ1/s",
"__ayft": "1741672162330",
"__arpvid": "1741672162331FBKgrn-1741672162342",
"__ayscnt": "1",
"__aypstp": "1",
"__ayspstp": "3",
"tfstk": "gZbiib4JpG-6DqW-B98_2rwPuFrd1fTXQt3vHEp4YpJIBA3OgrWcwOi90RTOo9XVQ5tAM5NcK_CP6Ep97K2ce1XDc59v3KXAgGFLyzC11ET2n8U8yoyib67M3xL25e8gS8pbyzC1_ET4e8URWTsSnHv2uh8VTeJBgEuN3d-ELQAWuKWV36PHGpJ2uEWVTxvicLX1ewyUXYSekxMf-CxMEqpnoqVvshvP_pABOwvXjL5wKqeulm52np_zpkfCDGW9Ot4uKFIRwZtP7vP9_gfAr3KEpDWXSIfWRay-DHIc_Z-hAzkD1i5Ooi5LZ0O5YO_1mUc476YMI3R6xzucUnRlNe_zemKdm172xMwr2L7CTgIkbvndhFAVh3_YFV9Ng__52U4SQKIdZZjc4diE4EUxlFrfKmiXbBOHeP72v7sAahuTtWm78hRB1yV3tmg9bBOEhWVnq5KwOBL5."
}
def homeContent(self, filter):
result = {}
categories = ["电视剧", "电影", "综艺", "动漫", "少儿", "纪录片", "文化", "亲子", "教育", "搞笑", "生活",
"体育", "音乐", "游戏"]
classes = [{'type_name': category, 'type_id': category} for category in categories]
filters = {}
self.typeid = {}
with ThreadPoolExecutor(max_workers=len(categories)) as executor:
tasks = {
executor.submit(self.cf, {'type': category}, True): category
for category in categories
}
for future in as_completed(tasks):
try:
category = tasks[future]
session, ft = future.result()
filters[category] = ft
self.typeid[category] = session
except Exception as e:
print(f"处理分类 {tasks[future]} 时出错: {str(e)}")
result['class'] = classes
result['filters'] = filters
return result
def homeVideoContent(self):
try:
vlist = []
params={"ms_codes":"2019061000","params":"{\"debug\":0,\"gray\":0,\"pageNo\":1,\"utdid\":\"ZYmGMAAAACkDAMU8hbiMmYdd\",\"userId\":\"\",\"bizKey\":\"YOUKU_WEB\",\"appPackageKey\":\"com.youku.YouKu\",\"showNodeList\":0,\"reqSubNode\":0,\"nodeKey\":\"WEBHOME\",\"bizContext\":\"{\\\"spmA\\\":\\\"a2hja\\\"}\"}","system_info":"{\"device\":\"pcweb\",\"os\":\"pcweb\",\"ver\":\"1.0.0.0\",\"userAgent\":\"Mozilla/5.0 (; Windows 10.0.26100.3194_64 ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Electron/14.2.0 Safari/537.36 Node/14.17.0 YoukuDesktop/9.2.60 UOSYouku (2.0.1)-Electron(UTDID ZYmGMAAAACkDAMU8hbiMmYdd;CHANNEL official;ZREAL 0;BTYPE TM2013;BRAND TIMI;BUILDVER 9.2.60.1001)\",\"guid\":\"1590141704165YXe\",\"appPackageKey\":\"com.youku.pcweb\",\"young\":0,\"brand\":\"\",\"network\":\"\",\"ouid\":\"\",\"idfa\":\"\",\"scale\":\"\",\"operator\":\"\",\"resolution\":\"\",\"pid\":\"\",\"childGender\":0,\"zx\":0}"}
data=self.getdata(f'{self.h5host}/h5/mtop.youku.columbus.home.query/1.0/',params)
okey=list(data['data'].keys())[0]
for i in data['data'][okey]['data']['nodes'][0]['nodes'][-1]['nodes'][0]['nodes']:
if i.get('nodes') and i['nodes'][0].get('data'):
i=i['nodes'][0]['data']
if i.get('assignId'):
vlist.append({
'vod_id': i['assignId'],
'vod_name': i.get('title'),
'vod_pic': i.get('vImg') or i.get('img'),
'vod_year': i.get('mark',{}).get('data',{}).get('text'),
'vod_remarks': i.get('summary')
})
return {'list': vlist}
except Exception as e:
print(f"处理主页视频数据时出错: {str(e)}")
return {'list': []}
def categoryContent(self, tid, pg, filter, extend):
result = {}
vlist = []
result['page'] = pg
result['limit'] = 90
result['total'] = 999999
pagecount = 9999
params = {'type': tid}
id = self.typeid[tid]
params.update(extend)
if pg == '1':
id=self.cf(params)
data=self.session.get(f'{self.host}/category/data?session={id}&params={quote(json.dumps(params))}&pageNo={pg}').json()
try:
data=data['data']['filterData']
for i in data['listData']:
if i.get('videoLink') and 's=' in i['videoLink']:
vlist.append({
'vod_id': i.get('videoLink').split('s=')[-1],
'vod_name': i.get('title'),
'vod_pic': i.get('img'),
'vod_year': i.get('rightTagText'),
'vod_remarks': i.get('summary')
})
self.typeid[tid]=quote(json.dumps(data['session']))
except:
pagecount=pg
result['list'] = vlist
result['pagecount'] = pagecount
return result
def detailContent(self, ids):
try:
data=self.session.get(f'{self.ihost}/v_getvideo_info/?showId={ids[0]}').json()
v=data['data']
vod = {
'type_name': v.get('showVideotype'),
'vod_year': v.get('lastUpdate'),
'vod_remarks': v.get('rc_title'),
'vod_actor': v.get('_personNameStr'),
'vod_content': v.get('showdesc'),
'vod_play_from': '优酷',
'vod_play_url': ''
}
params={"biz":"new_detail_web2","videoId":v.get('vid'),"scene":"web_page","componentVersion":"3","ip":data.get('ip'),"debug":0,"utdid":"ZYmGMAAAACkDAMU8hbiMmYdd","userId":0,"platform":"pc","nextSession":"","gray":0,"source":"pcNoPrev","showId":ids[0]}
sdata,index=self.getinfo(params)
pdata=sdata['nodes']
if index > len(pdata):
batch_size = len(pdata)
total_batches = ((index + batch_size - 1) // batch_size) - 1
ssj = json.loads(sdata['data']['session'])
with ThreadPoolExecutor(max_workers=total_batches) as executor:
futures = []
for batch in range(total_batches):
start = batch_size + 1 + (batch * batch_size)
end = start + batch_size - 1
next_session = ssj.copy()
next_session.update({
"itemStartStage": start,
"itemEndStage": min(end, index)
})
current_params = params.copy()
current_params['nextSession'] = json.dumps(next_session)
futures.append((start, executor.submit(self.getvinfo, current_params)))
futures.sort(key=lambda x: x[0])
for _, future in futures:
try:
result = future.result()
pdata.extend(result['nodes'])
except Exception as e:
print(f"Error fetching data: {str(e)}")
vod['vod_play_url'] = '#'.join([f"{i['data'].get('title')}${i['data']['action'].get('value')}" for i in pdata])
return {'list': [vod]}
except Exception as e:
print(e)
return {'list': [{'vod_play_from': '哎呀翻车啦', 'vod_play_url': f'呜呜呜${self.host}'}]}
def searchContent(self, key, quick, pg="1"):
data=self.session.get(f'{self.shost}/api/search?pg={pg}&keyword={key}').json()
vlist = []
for i in data['pageComponentList']:
if i.get('commonData') and (i['commonData'].get('showId') or i['commonData'].get('realShowId')):
i=i['commonData']
vlist.append({
'vod_id': i.get('showId') or i.get('realShowId'),
'vod_name': i['titleDTO'].get('displayName'),
'vod_pic': i['posterDTO'].get('vThumbUrl'),
'vod_year': i.get('feature'),
'vod_remarks': i.get('updateNotice')
})
return {'list': vlist, 'page': pg}
def playerContent(self, flag, id, vipFlags):
return {'jx':1,'parse': 1, 'url': f"{self.ihost}/video?vid={id}", 'header': ''}
def localProxy(self, param):
pass
def cf(self,params,b=False):
response = self.session.get(f'{self.host}/category/data?params={quote(json.dumps(params))}&optionRefresh=1&pageNo=1').json()
data=response['data']['filterData']
session=quote(json.dumps(data['session']))
if b:
return session,self.get_filter_data(data['filter']['filterData'][1:])
return session
def process_key(self, key):
if '_' not in key:
return key
parts = key.split('_')
result = parts[0]
for part in parts[1:]:
if part:
result += part[0].upper() + part[1:]
return result
def get_filter_data(self, data):
result = []
try:
for item in data:
if not item.get('subFilter'):
continue
first_sub = item['subFilter'][0]
if not first_sub.get('filterType'):
continue
filter_item = {
'key': self.process_key(first_sub['filterType']),
'name': first_sub['title'],
'value': []
}
for sub in item['subFilter']:
if 'value' in sub:
filter_item['value'].append({
'n': sub['title'],
'v': sub['value']
})
if filter_item['value']:
result.append(filter_item)
except Exception as e:
print(f"处理筛选数据时出错: {str(e)}")
return result
def get_ctoken(self):
data=self.session.get(f'{self.h5host}/h5/mtop.ykrec.recommendservice.recommend/1.0/?jsv=2.6.1&appKey=24679788')
def md5(self,t,text):
h = MD5.new()
token=self.session.cookies.get('_m_h5_tk').split('_')[0]
data=f"{token}&{t}&24679788&{text}"
h.update(data.encode('utf-8'))
return h.hexdigest()
def getdata(self, url, params, recursion_count=0, max_recursion=3):
data = json.dumps(params)
t = int(time.time() * 1000)
jsdata = {
'appKey': '24679788',
't': t,
'sign': self.md5(t, data),
'data': data
}
response = self.session.get(url, params=jsdata)
if '令牌过期' in response.text:
if recursion_count >= max_recursion:
raise Exception("达到最大递归次数,无法继续请求")
self.get_ctoken()
return self.getdata(url, params, recursion_count + 1, max_recursion)
else:
return response.json()
def getvinfo(self,params):
body = {
"ms_codes": "2019030100",
"params": json.dumps(params),
"system_info": "{\"os\":\"iku\",\"device\":\"iku\",\"ver\":\"9.2.9\",\"appPackageKey\":\"com.youku.iku\",\"appPackageId\":\"pcweb\"}"
}
data = self.getdata(f'{self.h5host}/h5/mtop.youku.columbus.gateway.new.execute/1.0/', body)
okey = list(data['data'].keys())[0]
i = data['data'][okey]['data']
return i
def getinfo(self,params):
i = self.getvinfo(params)
jdata=i['nodes'][0]['nodes'][3]
info=i['data']['extra']['episodeTotal']
if i['data']['extra']['showCategory'] in ['电影','游戏']:
jdata = i['nodes'][0]['nodes'][4]
return jdata,info

View File

@@ -1,248 +0,0 @@
# -*- coding: utf-8 -*-
# by @嗷呜
import random
import sys
from base64 import b64encode, b64decode
from concurrent.futures import ThreadPoolExecutor, as_completed
from urllib.parse import urlencode
sys.path.append('..')
from base.spider import Spider
class Spider(Spider):
def init(self, extend=""):
self.did = self.random_str(32)
pass
def getName(self):
pass
def isVideoFormat(self, url):
pass
def manualVideoCheck(self):
pass
def destroy(self):
pass
rhost = 'https://www.iqiyi.com'
hhost='https://mesh.if.iqiyi.com'
dhost='https://miniapp.iqiyi.com'
headers = {
'Origin': rhost,
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36',
'Referer': f'{rhost}/',
}
def homeContent(self, filter):
result = {}
cateManual = {
"全部": "1009",
"电影": "1",
"剧集": "2",
"综艺": "6",
"动漫": "4",
"儿童": "15",
"微剧": "35",
"纪录片": "3"
}
classes = []
filters = {}
for k in cateManual:
classes.append({
'type_name': k,
'type_id': cateManual[k]
})
with ThreadPoolExecutor(max_workers=len(classes)) as executor:
results = executor.map(self.getf, classes)
for id, ft in results:
if len(ft):filters[id] = ft
result['class'] = classes
result['filters'] = filters
return result
def homeVideoContent(self):
data=self.fetch(f'{self.hhost}/portal/lw/v5/channel/recommend?v=13.014.21150', headers=self.headers).json()
vlist = []
for i in data['items'][1:]:
for j in i['video'][0]['data']:
id = j.get('firstId')
pic=j.get('prevue',{}).get('image_url') or j.get('album_image_url_hover')
if id and pic:
pu=j.get('prevue',{}).get('page_url') or j.get('page_url').split('?')[0]
id = f'{id}@{self.e64(pu)}'
vlist.append({
'vod_id': id,
'vod_name': j.get('display_name'),
'vod_pic': pic,
'vod_year': j.get('sns_score'),
'vod_remarks': j.get('dq_updatestatus') or j.get('rank_prefix')
})
return {'list':vlist}
def categoryContent(self, tid, pg, filter, extend):
if pg == "1":
self.sid = ''
new_data = {'mode':'24'}
for key, value in extend.items():
if value:
key_value_pairs = self.d64(value).split(',')
for pair in key_value_pairs:
k, v = pair.split('=')
if k in new_data:
new_data[k] += "," + v
else:
new_data[k] = v
path=f'/portal/lw/videolib/data?uid=&passport_id=&ret_num=60&version=13.014.21150&device_id={self.did}&channel_id={tid}&page_id={pg}&session={self.sid}&os=&conduit_id=&vip=0&auth&recent_selected_tag=&ad=%5B%7B%22lm%22:%225%22,%22ai%22:%225%22,%22fp%22:%226%22,%22sei%22:%22Sa867aa9d326e2bd8654d8c2a8636055e%22,%22position%22:%22library%22%7D%5D&adExt=%7B%22r%22:%221.2.1-ares6-pure%22%7D&dfp=a12f96215b2f7842a98c082799ca0c3d9236be00946701b106829754d8ece3aaf8&filter={urlencode(new_data)}'
data=self.fetch(f'{self.hhost}{path}', headers=self.headers).json()
self.sid = data['session']
videos = []
for i in data['data']:
id = i.get('firstId') or i.get('tv_id')
if not id:
id=i.get('play_url').split(';')[0].split('=')[-1]
if id and not i.get('h'):
id=f'{id}@{self.e64(i.get("page_url"))}'
videos.append({
'vod_id': id,
'vod_name': i.get('display_name'),
'vod_pic': i.get('album_image_url_hover'),
'vod_year': i.get('sns_score'),
'vod_remarks': i.get('dq_updatestatus') or i.get('pay_mark')
})
result = {}
result['list'] = videos
result['page'] = pg
result['pagecount'] = 9999
result['limit'] = 90
result['total'] = 999999
return result
def detailContent(self, ids):
ids = ids[0].split('@')
ids[-1] = self.d64(ids[-1])
data = self.fetch(f'{self.dhost}/h5/mina/baidu/play/body/v1/{ids[0]}/', headers=self.headers).json()
v=data['data']['playInfo']
vod = {
'vod_name': v.get('albumName'),
'type_name': v.get('tags'),
'vod_year': v.get('albumYear'),
'vod_remarks': v.get('updateStrategy'),
'vod_actor': v.get('mainActors'),
'vod_director': v.get('directors'),
'vod_content': v.get('albumDesc'),
'vod_play_from': '爱奇艺',
'vod_play_url': ''
}
if data.get('data') and data['data'].get('videoList') and data['data']['videoList'].get('videos'):
purl=[f'{i["shortTitle"]}${i["pageUrl"]}' for i in data['data']['videoList']['videos']]
pg=data['data']['videoList'].get('totalPages')
if pg and pg > 1:
id = v['albumId']
pages = list(range(2, pg + 1))
page_results = {}
with ThreadPoolExecutor(max_workers=10) as executor:
future_to_page = {
executor.submit(self.fetch_page_data, page, id): page
for page in pages
}
for future in as_completed(future_to_page):
page = future_to_page[future]
try:
result = future.result()
page_results[page] = result
except Exception as e:
print(f"Error fetching page {page}: {e}")
for page in sorted(page_results.keys()):
purl.extend(page_results[page])
vod['vod_play_url'] = '#'.join(purl)
else:
vdata=self.fetch(f'{self.dhost}/h5/mina/baidu/play/head/v1/{ids[0]}/', headers=self.headers).json()
v=vdata['data']['playInfo']
vod = {
'vod_name': v.get('shortTitle'),
'type_name': v.get('channelName'),
'vod_year': v.get('year'),
'vod_remarks': v.get('focus'),
'vod_actor': v.get('mainActors'),
'vod_director': v.get('directors'),
'vod_content': v.get('desc'),
'vod_play_from': '爱奇艺',
'vod_play_url': f'{v.get("shortTitle")}${ids[-1]}'
}
return {'list':[vod]}
def searchContent(self, key, quick, pg="1"):
data=self.fetch(f'{self.hhost}/portal/lw/search/homePageV3?key={key}&current_page={pg}&mode=1&source=input&suggest=&version=13.014.21150&pageNum={pg}&pageSize=25&pu=&u={self.did}&scale=150&token=&userVip=0&conduit=&vipType=-1&os=&osShortName=win10&dataType=&appMode=', headers=self.headers).json()
videos = []
vdata=data['data']['templates']
for i in data['data']['templates']:
if i.get('intentAlbumInfos'):
vdata=[{'albumInfo': c} for c in i['intentAlbumInfos']]+vdata
for i in vdata:
if i.get('albumInfo') and (i['albumInfo'].get('playQipuId','') or i['albumInfo'].get('qipuId')) and i['albumInfo'].get('pageUrl'):
b=i['albumInfo']
id=f"{(b.get('playQipuId','') or b.get('qipuId'))}@{self.e64(b.get('pageUrl'))}"
videos.append({
'vod_id': id,
'vod_name': b.get('title'),
'vod_pic': b.get('img'),
'vod_year': (b.get('year',{}) or {}).get('value'),
'vod_remarks': b.get('subscriptContent') or b.get('channel') or b.get('vipTips')
})
return {'list':videos,'page':pg}
def playerContent(self, flag, id, vipFlags):
return {'jx':1,'parse': 1, 'url': id, 'header': ''}
def localProxy(self, param):
pass
def fetch_page_data(self, page, id):
try:
url = f'{self.dhost}/h5/mina/avlist/{page}/{id}/'
data = self.fetch(url, headers=self.headers).json()
return [f'{i["shortTitle"]}${i["pageUrl"]}' for i in data['data']['videoList']['videos']]
except:
return []
def getf(self,body):
data=self.fetch(f'{self.hhost}/portal/lw/videolib/tag?channel_id={body["type_id"]}&tagAdd=&selected_tag_name=&version=13.014.21150&device={self.did}&uid=', headers=self.headers).json()
ft = []
# for i in data[:-1]:
for i in data:
try:
value_array = [{"n": value['text'], "v": self.e64(value['tag_param'])} for value in i['tags'] if
value.get('tag_param')]
ft.append({"key": i['group'], "name": i['group'], "value": value_array})
except:
print(i)
return (body['type_id'], ft)
def e64(self, text):
try:
text_bytes = text.encode('utf-8')
encoded_bytes = b64encode(text_bytes)
return encoded_bytes.decode('utf-8')
except Exception as e:
print(f"Base64编码错误: {str(e)}")
return ""
def d64(self,encoded_text: str):
try:
encoded_bytes = encoded_text.encode('utf-8')
decoded_bytes = b64decode(encoded_bytes)
return decoded_bytes.decode('utf-8')
except Exception as e:
print(f"Base64解码错误: {str(e)}")
return ""
def random_str(self,length=16):
hex_chars = '0123456789abcdef'
return ''.join(random.choice(hex_chars) for _ in range(length))

View File

@@ -1,323 +0,0 @@
# -*- coding: utf-8 -*-
# by @嗷呜
import json
import sys
import uuid
import copy
sys.path.append('..')
from base.spider import Spider
from concurrent.futures import ThreadPoolExecutor, as_completed
class Spider(Spider):
def init(self, extend=""):
self.dbody = {
"page_params": {
"channel_id": "",
"filter_params": "sort=75",
"page_type": "channel_operation",
"page_id": "channel_list_second_page"
}
}
self.body = self.dbody
pass
def getName(self):
pass
def isVideoFormat(self, url):
pass
def manualVideoCheck(self):
pass
def destroy(self):
pass
host = 'https://v.qq.com'
apihost = 'https://pbaccess.video.qq.com'
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.5410.0 Safari/537.36',
'origin': host,
'referer': f'{host}/'
}
def homeContent(self, filter):
cdata = {
"电视剧": "100113",
"电影": "100173",
"综艺": "100109",
"纪录片": "100105",
"动漫": "100119",
"少儿": "100150",
"短剧": "110755"
}
result = {}
classes = []
filters = {}
for k in cdata:
classes.append({
'type_name': k,
'type_id': cdata[k]
})
with ThreadPoolExecutor(max_workers=len(classes)) as executor:
futures = [executor.submit(self.get_filter_data, item['type_id']) for item in classes]
for future in futures:
cid, data = future.result()
if not data.get('data', {}).get('module_list_datas'):
continue
filter_dict = {}
try:
items = data['data']['module_list_datas'][-1]['module_datas'][-1]['item_data_lists']['item_datas']
for item in items:
if not item.get('item_params', {}).get('index_item_key'):
continue
params = item['item_params']
filter_key = params['index_item_key']
if filter_key not in filter_dict:
filter_dict[filter_key] = {
'key': filter_key,
'name': params['index_name'],
'value': []
}
filter_dict[filter_key]['value'].append({
'n': params['option_name'],
'v': params['option_value']
})
except (IndexError, KeyError):
continue
filters[cid] = list(filter_dict.values())
result['class'] = classes
result['filters'] = filters
return result
def homeVideoContent(self):
json_data = {'page_context':None,'page_params':{'page_id':'100101','page_type':'channel','skip_privacy_types':'0','support_click_scan':'1','new_mark_label_enabled':'1','ams_cookies':'',},'page_bypass_params':{'params':{'caller_id':'','data_mode':'default','page_id':'','page_type':'channel','platform_id':'2','user_mode':'default',},'scene':'channel','abtest_bypass_id':'',}}
data = self.post(f'{self.apihost}/trpc.vector_layout.page_view.PageService/getPage',headers=self.headers, json=json_data).json()
vlist = []
for it in data['data']['CardList'][0]['children_list']['list']['cards']:
if it.get('params'):
p = it['params']
tag = json.loads(p.get('uni_imgtag', '{}') or p.get('imgtag', '{}') or '{}')
id = it.get('id') or p.get('cid')
name = p.get('mz_title') or p.get('title')
if name and 'http' not in id:
vlist.append({
'vod_id': id,
'vod_name': name,
'vod_pic': p.get('image_url'),
'vod_year': tag.get('tag_2', {}).get('text'),
'vod_remarks': tag.get('tag_4', {}).get('text')
})
return {'list': vlist}
def categoryContent(self, tid, pg, filter, extend):
result = {}
params = {
"sort": extend.get('sort', '75'),
"attr": extend.get('attr', '-1'),
"itype": extend.get('itype', '-1'),
"ipay": extend.get('ipay', '-1'),
"iarea": extend.get('iarea', '-1'),
"iyear": extend.get('iyear', '-1'),
"theater": extend.get('theater', '-1'),
"award": extend.get('award', '-1'),
"recommend": extend.get('recommend', '-1')
}
if pg == '1':
self.body = self.dbody.copy()
self.body['page_params']['channel_id'] = tid
self.body['page_params']['filter_params'] = self.josn_to_params(params)
data = self.post(
f'{self.apihost}/trpc.universal_backend_service.page_server_rpc.PageServer/GetPageData?video_appid=1000005&vplatform=2&vversion_name=8.9.10&new_mark_label_enabled=1',
json=self.body, headers=self.headers).json()
ndata = data['data']
if ndata['has_next_page']:
result['pagecount'] = 9999
self.body['page_context'] = ndata['next_page_context']
else:
result['pagecount'] = int(pg)
vlist = []
for its in ndata['module_list_datas'][-1]['module_datas'][-1]['item_data_lists']['item_datas']:
id = its.get('item_params', {}).get('cid')
if id:
p = its['item_params']
tag = json.loads(p.get('uni_imgtag', '{}') or p.get('imgtag', '{}') or '{}')
name = p.get('mz_title') or p.get('title')
pic = p.get('new_pic_hz') or p.get('new_pic_vt')
vlist.append({
'vod_id': id,
'vod_name': name,
'vod_pic': pic,
'vod_year': tag.get('tag_2', {}).get('text'),
'vod_remarks': tag.get('tag_4', {}).get('text')
})
result['list'] = vlist
result['page'] = pg
result['limit'] = 90
result['total'] = 999999
return result
def detailContent(self, ids):
vbody = {"page_params":{"req_from":"web","cid":ids[0],"vid":"","lid":"","page_type":"detail_operation","page_id":"detail_page_introduction"},"has_cache":1}
body = {"page_params":{"req_from":"web_vsite","page_id":"vsite_episode_list","page_type":"detail_operation","id_type":"1","page_size":"","cid":ids[0],"vid":"","lid":"","page_num":"","page_context":"","detail_page_type":"1"},"has_cache":1}
with ThreadPoolExecutor(max_workers=2) as executor:
future_detail = executor.submit(self.get_vdata, vbody)
future_episodes = executor.submit(self.get_vdata, body)
vdata = future_detail.result()
data = future_episodes.result()
pdata = self.process_tabs(data, body, ids)
if not pdata:
return self.handle_exception(None, "No pdata available")
try:
star_list = vdata['data']['module_list_datas'][0]['module_datas'][0]['item_data_lists']['item_datas'][
0].get('sub_items', {}).get('star_list', {}).get('item_datas', [])
actors = [star['item_params']['name'] for star in star_list]
names = ['腾讯视频', '预告片']
plist, ylist = self.process_pdata(pdata, ids)
if not plist:
del names[0]
if not ylist:
del names[1]
vod = self.build_vod(vdata, actors, plist, ylist, names)
return {'list': [vod]}
except Exception as e:
return self.handle_exception(e, "Error processing detail")
def searchContent(self, key, quick, pg="1"):
headers = self.headers.copy()
headers.update({'Content-Type': 'application/json'})
body = {'version':'25021101','clientType':1,'filterValue':'','uuid':str(uuid.uuid4()),'retry':0,'query':key,'pagenum':int(pg)-1,'pagesize':30,'queryFrom':0,'searchDatakey':'','transInfo':'','isneedQc':True,'preQid':'','adClientInfo':'','extraInfo':{'isNewMarkLabel':'1','multi_terminal_pc':'1','themeType':'1',},}
data = self.post(f'{self.apihost}/trpc.videosearch.mobile_search.MultiTerminalSearch/MbSearch?vplatform=2',
json=body, headers=headers).json()
vlist = []
vname=["电视剧", "电影", "综艺", "纪录片", "动漫", "少儿", "短剧"]
v=data['data']['normalList']['itemList']
d=data['data']['areaBoxList'][0]['itemList']
q=v+d
if v[0].get('doc') and v[0]['doc'].get('id') =='MainNeed':q=d+v
for k in q:
if k.get('doc') and k.get('videoInfo') and k['doc'].get('id') and '外站' not in k['videoInfo'].get('subTitle') and k['videoInfo'].get('title') and k['videoInfo'].get('typeName') in vname:
img_tag = k.get('videoInfo', {}).get('imgTag')
if img_tag is not None and isinstance(img_tag, str):
try:
tag = json.loads(img_tag)
except json.JSONDecodeError as e:
tag = {}
else:
tag = {}
pic = k.get('videoInfo', {}).get('imgUrl')
vlist.append({
'vod_id': k['doc']['id'],
'vod_name': self.removeHtmlTags(k['videoInfo']['title']),
'vod_pic': pic,
'vod_year': k['videoInfo'].get('typeName') +' '+ tag.get('tag_2', {}).get('text', ''),
'vod_remarks': tag.get('tag_4', {}).get('text', '')
})
return {'list': vlist, 'page': pg}
def playerContent(self, flag, id, vipFlags):
ids = id.split('@')
url = f"{self.host}/x/cover/{ids[0]}/{ids[1]}.html"
return {'jx':1,'parse': 1, 'url': url, 'header': ''}
def localProxy(self, param):
pass
def get_filter_data(self, cid):
hbody = self.dbody.copy()
hbody['page_params']['channel_id'] = cid
data = self.post(
f'{self.apihost}/trpc.universal_backend_service.page_server_rpc.PageServer/GetPageData?video_appid=1000005&vplatform=2&vversion_name=8.9.10&new_mark_label_enabled=1',
json=hbody, headers=self.headers).json()
return cid, data
def get_vdata(self, body):
try:
vdata = self.post(
f'{self.apihost}/trpc.universal_backend_service.page_server_rpc.PageServer/GetPageData?video_appid=3000010&vplatform=2&vversion_name=8.2.96',
json=body, headers=self.headers
).json()
return vdata
except Exception as e:
print(f"Error in get_vdata: {str(e)}")
return {'data': {'module_list_datas': []}}
def process_pdata(self, pdata, ids):
plist = []
ylist = []
for k in pdata:
if k.get('item_id'):
pid = f"{k['item_params']['union_title']}${ids[0]}@{k['item_id']}"
if '预告' in k['item_params']['union_title']:
ylist.append(pid)
else:
plist.append(pid)
return plist, ylist
def build_vod(self, vdata, actors, plist, ylist, names):
d = vdata['data']['module_list_datas'][0]['module_datas'][0]['item_data_lists']['item_datas'][0]['item_params']
urls = []
if plist:
urls.append('#'.join(plist))
if ylist:
urls.append('#'.join(ylist))
vod = {
'type_name': d.get('sub_genre', ''),
'vod_name': d.get('title', ''),
'vod_year': d.get('year', ''),
'vod_area': d.get('area_name', ''),
'vod_remarks': d.get('holly_online_time', '') or d.get('hotval', ''),
'vod_actor': ','.join(actors),
'vod_content': d.get('cover_description', ''),
'vod_play_from': '$$$'.join(names),
'vod_play_url': '$$$'.join(urls)
}
return vod
def handle_exception(self, e, message):
print(f"{message}: {str(e)}")
return {'list': [{'vod_play_from': '哎呀翻车啦', 'vod_play_url': '翻车啦#555'}]}
def process_tabs(self, data, body, ids):
try:
pdata = data['data']['module_list_datas'][-1]['module_datas'][-1]['item_data_lists']['item_datas']
tabs = data['data']['module_list_datas'][-1]['module_datas'][-1]['module_params'].get('tabs')
if tabs and len(json.loads(tabs)):
tabs = json.loads(tabs)
remaining_tabs = tabs[1:]
task_queue = []
for tab in remaining_tabs:
nbody = copy.deepcopy(body)
nbody['page_params']['page_context'] = tab['page_context']
task_queue.append(nbody)
with ThreadPoolExecutor(max_workers=10) as executor:
future_map = {executor.submit(self.get_vdata, task): idx for idx, task in enumerate(task_queue)}
results = [None] * len(task_queue)
for future in as_completed(future_map.keys()):
idx = future_map[future]
results[idx] = future.result()
for result in results:
if result:
page_data = result['data']['module_list_datas'][-1]['module_datas'][-1]['item_data_lists'][
'item_datas']
pdata.extend(page_data)
return pdata
except Exception as e:
print(f"Error processing episodes: {str(e)}")
return []
def josn_to_params(self, params, skip_empty=False):
query = []
for k, v in params.items():
if skip_empty and not v:
continue
query.append(f"{k}={v}")
return "&".join(query)

View File

@@ -1,205 +0,0 @@
# -*- coding: utf-8 -*-
# by @嗷呜
import sys
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
sys.path.append('..')
from base.spider import Spider
class Spider(Spider):
def init(self, extend=""):
pass
def getName(self):
pass
def isVideoFormat(self, url):
pass
def manualVideoCheck(self):
pass
def destroy(self):
pass
rhost='https://www.mgtv.com'
host='https://pianku.api.mgtv.com'
vhost='https://pcweb.api.mgtv.com'
mhost='https://dc.bz.mgtv.com'
shost='https://mobileso.bz.mgtv.com'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.6478.61 Chrome/126.0.6478.61 Not/A)Brand/8 Safari/537.36',
'origin': rhost,
'referer': f'{rhost}/'
}
def homeContent(self, filter):
result = {}
cateManual = {
"电影": "3",
"电视剧": "2",
"综艺": "1",
"动画": "50",
"少儿": "10",
"纪录片": "51",
"教育": "115"
}
classes = []
filters = {}
for k in cateManual:
classes.append({
'type_name': k,
'type_id': cateManual[k]
})
with ThreadPoolExecutor(max_workers=len(classes)) as executor:
results = executor.map(self.getf, classes)
for id, ft in results:
if len(ft):filters[id] = ft
result['class'] = classes
result['filters'] = filters
return result
def homeVideoContent(self):
data=self.fetch(f'{self.mhost}/dynamic/v1/channel/index/0/0/0/1000000/0/0/17/1354?type=17&version=5.0&t={str(int(time.time()*1000))}&_support=10000000', headers=self.headers).json()
videoList = []
for i in data['data']:
if i.get('DSLList') and len(i['DSLList']):
for j in i['DSLList']:
if j.get('data') and j['data'].get('items') and len(j['data']['items']):
for k in j['data']['items']:
videoList.append({
'vod_id': k["videoId"],
'vod_name': k['videoName'],
'vod_pic': k['img'],
'vod_year': k.get('cornerTitle'),
'vod_remarks': k.get('time') or k.get('desc'),
})
return {'list':videoList}
def categoryContent(self, tid, pg, filter, extend):
body={
'allowedRC': '1',
'platform': 'pcweb',
'channelId': tid,
'pn': pg,
'pc': '80',
'hudong': '1',
'_support': '10000000'
}
body.update(extend)
data=self.fetch(f'{self.host}/rider/list/pcweb/v3', params=body, headers=self.headers).json()
videoList = []
for i in data['data']['hitDocs']:
videoList.append({
'vod_id': i["playPartId"],
'vod_name': i['title'],
'vod_pic': i['img'],
'vod_year': (i.get('rightCorner',{}) or {}).get('text') or i.get('year'),
'vod_remarks': i['updateInfo']
})
result = {}
result['list'] = videoList
result['page'] = pg
result['pagecount'] = 9999
result['limit'] = 90
result['total'] = 999999
return result
def detailContent(self, ids):
vbody={'allowedRC': '1', 'vid': ids[0], 'type': 'b', '_support': '10000000'}
vdata=self.fetch(f'{self.vhost}/video/info', params=vbody, headers=self.headers).json()
d=vdata['data']['info']['detail']
vod = {
'vod_name': vdata['data']['info']['title'],
'type_name': d.get('kind'),
'vod_year': d.get('releaseTime'),
'vod_area': d.get('area'),
'vod_lang': d.get('language'),
'vod_remarks': d.get('updateInfo'),
'vod_actor': d.get('leader'),
'vod_director': d.get('director'),
'vod_content': d.get('story'),
'vod_play_from': '芒果TV',
'vod_play_url': ''
}
data,pdata=self.fetch_page_data('1', ids[0],True)
pagecount=data['data'].get('total_page') or 1
if int(pagecount)>1:
pages = list(range(2, pagecount+1))
page_results = {}
with ThreadPoolExecutor(max_workers=10) as executor:
future_to_page = {
executor.submit(self.fetch_page_data, page, ids[0]): page
for page in pages
}
for future in as_completed(future_to_page):
page = future_to_page[future]
try:
result = future.result()
page_results[page] = result
except Exception as e:
print(f"Error fetching page {page}: {e}")
for page in sorted(page_results.keys()):
pdata.extend(page_results[page])
vod['vod_play_url'] = '#'.join(pdata)
return {'list':[vod]}
def searchContent(self, key, quick, pg="1"):
data=self.fetch(f'{self.shost}/applet/search/v1?channelCode=mobile-wxap&q={key}&pn={pg}&pc=10&_support=10000000', headers=self.headers).json()
videoList = []
for i in data['data']['contents']:
if i.get('data') and len(i['data']):
k = i['data'][0]
if k.get('vid') and k.get('img'):
try:
videoList.append({
'vod_id': k['vid'],
'vod_name': k['title'],
'vod_pic': k['img'],
'vod_year': (i.get('rightTopCorner',{}) or {}).get('text') or i.get('year'),
'vod_remarks': '/'.join(i.get('desc',[])),
})
except:
print(k)
return {'list':videoList,'page':pg}
def playerContent(self, flag, id, vipFlags):
id=f'{self.rhost}{id}'
return {'jx':1,'parse': 1, 'url': id, 'header': ''}
def localProxy(self, param):
pass
def getf(self, body):
params = {
'allowedRC': '1',
'channelId': body['type_id'],
'platform': 'pcweb',
'_support': '10000000',
}
data = self.fetch(f'{self.host}/rider/config/channel/v1', params=params, headers=self.headers).json()
ft = []
for i in data['data']['listItems']:
try:
value_array = [{"n": value['tagName'], "v": value['tagId']} for value in i['items'] if
value.get('tagName')]
ft.append({"key": i['eName'], "name": i['typeName'], "value": value_array})
except:
print(i)
return body['type_id'], ft
def fetch_page_data(self, page, id, b=False):
body = {'version': '5.5.35', 'video_id': id, 'page': page, 'size': '30',
'platform': '4', 'src': 'mgtv', 'allowedRC': '1', '_support': '10000000'}
data = self.fetch(f'{self.vhost}/episode/list', params=body, headers=self.headers).json()
ldata = [f'{i["t3"]}${i["url"]}' for i in data['data']['list']]
if b:
return data, ldata
else:
return ldata

View File

@@ -1,225 +0,0 @@
# -*- coding: utf-8 -*-
# by @嗷呜
import json
import sys
import threading
import uuid
import requests
sys.path.append('..')
from base.spider import Spider
import time
from Crypto.Hash import MD5, SHA1
class Spider(Spider):
'''
配置示例:
{
"key": "xxxx",
"name": "xxxx",
"type": 3,
"api": ".所在路径/金牌.py",
"searchable": 1,
"quickSearch": 1,
"filterable": 1,
"changeable": 1,
"ext": {
"site": "https://www.jiabaide.cn,域名2,域名3"
}
},
'''
def init(self, extend=""):
if extend:
hosts=json.loads(extend)['site']
self.host = self.host_late(hosts)
pass
def getName(self):
pass
def isVideoFormat(self, url):
pass
def manualVideoCheck(self):
pass
def destroy(self):
pass
def homeContent(self, filter):
cdata = self.fetch(f"{self.host}/api/mw-movie/anonymous/get/filer/type", headers=self.getheaders()).json()
fdata = self.fetch(f"{self.host}/api/mw-movie/anonymous/v1/get/filer/list", headers=self.getheaders()).json()
result = {}
classes = []
filters={}
for k in cdata['data']:
classes.append({
'type_name': k['typeName'],
'type_id': str(k['typeId']),
})
sort_values = [{"n": "最近更新", "v": "2"},{"n": "人气高低", "v": "3"}, {"n": "评分高低", "v": "4"}]
for tid, d in fdata['data'].items():
current_sort_values = sort_values.copy()
if tid == '1':
del current_sort_values[0]
filters[tid] = [
{"key": "type", "name": "类型",
"value": [{"n": i["itemText"], "v": i["itemValue"]} for i in d["typeList"]]},
*([] if not d["plotList"] else [{"key": "v_class", "name": "剧情",
"value": [{"n": i["itemText"], "v": i["itemText"]}
for i in d["plotList"]]}]),
{"key": "area", "name": "地区",
"value": [{"n": i["itemText"], "v": i["itemText"]} for i in d["districtList"]]},
{"key": "year", "name": "年份",
"value": [{"n": i["itemText"], "v": i["itemText"]} for i in d["yearList"]]},
{"key": "lang", "name": "语言",
"value": [{"n": i["itemText"], "v": i["itemText"]} for i in d["languageList"]]},
{"key": "sort", "name": "排序", "value": current_sort_values}
]
result['class'] = classes
result['filters'] = filters
return result
def homeVideoContent(self):
data1 = self.fetch(f"{self.host}/api/mw-movie/anonymous/v1/home/all/list", headers=self.getheaders()).json()
data2=self.fetch(f"{self.host}/api/mw-movie/anonymous/home/hotSearch",headers=self.getheaders()).json()
data=[]
for i in data1['data'].values():
data.extend(i['list'])
data.extend(data2['data'])
vods=self.getvod(data)
return {'list':vods}
def categoryContent(self, tid, pg, filter, extend):
params = {
"area": extend.get('area', ''),
"filterStatus": "1",
"lang": extend.get('lang', ''),
"pageNum": pg,
"pageSize": "30",
"sort": extend.get('sort', '1'),
"sortBy": "1",
"type": extend.get('type', ''),
"type1": tid,
"v_class": extend.get('v_class', ''),
"year": extend.get('year', '')
}
data = self.fetch(f"{self.host}/api/mw-movie/anonymous/video/list?{self.js(params)}", headers=self.getheaders(params)).json()
result = {}
result['list'] = self.getvod(data['data']['list'])
result['page'] = pg
result['pagecount'] = 9999
result['limit'] = 90
result['total'] = 999999
return result
def detailContent(self, ids):
data=self.fetch(f"{self.host}/api/mw-movie/anonymous/video/detail?id={ids[0]}",headers=self.getheaders({'id':ids[0]})).json()
vod=self.getvod([data['data']])[0]
vod['vod_play_from']='金牌'
vod['vod_play_url'] = '#'.join(
f"{i['name'] if len(vod['episodelist']) > 1 else vod['vod_name']}${ids[0]}@@{i['nid']}" for i in
vod['episodelist'])
vod.pop('episodelist', None)
return {'list':[vod]}
def searchContent(self, key, quick, pg="1"):
params = {
"keyword": key,
"pageNum": pg,
"pageSize": "8",
"sourceCode": "1"
}
data=self.fetch(f"{self.host}/api/mw-movie/anonymous/video/searchByWord?{self.js(params)}",headers=self.getheaders(params)).json()
vods=self.getvod(data['data']['result']['list'])
return {'list':vods,'page':pg}
def playerContent(self, flag, id, vipFlags):
self.header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.6478.61 Chrome/126.0.6478.61 Not/A)Brand/8 Safari/537.36',
'sec-ch-ua-platform': '"Windows"',
'DNT': '1',
'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="126", "Google Chrome";v="126"',
'sec-ch-ua-mobile': '?0',
'Origin': self.host,
'Referer': f'{self.host}/'
}
ids=id.split('@@')
pdata = self.fetch(f"{self.host}/api/mw-movie/anonymous/v2/video/episode/url?clientType=1&id={ids[0]}&nid={ids[1]}",headers=self.getheaders({'clientType':'1','id': ids[0], 'nid': ids[1]})).json()
vlist=[]
for i in pdata['data']['list']:vlist.extend([i['resolutionName'],i['url']])
return {'parse':0,'url':vlist,'header':self.header}
def localProxy(self, param):
pass
def host_late(self, url_list):
if isinstance(url_list, str):
urls = [u.strip() for u in url_list.split(',')]
else:
urls = url_list
if len(urls) <= 1:
return urls[0] if urls else ''
results = {}
threads = []
def test_host(url):
try:
start_time = time.time()
response = requests.head(url, timeout=1.0, allow_redirects=False)
delay = (time.time() - start_time) * 1000
results[url] = delay
except Exception as e:
results[url] = float('inf')
for url in urls:
t = threading.Thread(target=test_host, args=(url,))
threads.append(t)
t.start()
for t in threads:
t.join()
return min(results.items(), key=lambda x: x[1])[0]
def md5(self, sign_key):
md5_hash = MD5.new()
md5_hash.update(sign_key.encode('utf-8'))
md5_result = md5_hash.hexdigest()
return md5_result
def js(self, param):
return '&'.join(f"{k}={v}" for k, v in param.items())
def getheaders(self, param=None):
if param is None:param = {}
t=str(int(time.time()*1000))
param['key']='cb808529bae6b6be45ecfab29a4889bc'
param['t']=t
sha1_hash = SHA1.new()
sha1_hash.update(self.md5(self.js(param)).encode('utf-8'))
sign = sha1_hash.hexdigest()
deviceid = str(uuid.uuid4())
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.6478.61 Chrome/126.0.6478.61 Not/A)Brand/8 Safari/537.36',
'Accept': 'application/json, text/plain, */*',
'sign': sign,
't': t,
'deviceid':deviceid
}
return headers
def convert_field_name(self, field):
field = field.lower()
if field.startswith('vod') and len(field) > 3:
field = field.replace('vod', 'vod_')
if field.startswith('type') and len(field) > 4:
field = field.replace('type', 'type_')
return field
def getvod(self, array):
return [{self.convert_field_name(k): v for k, v in item.items()} for item in array]

View File

@@ -1,220 +0,0 @@
# -*- coding: utf-8 -*-
# by @嗷呜
import json
import random
import sys
from base64 import b64encode, b64decode
from concurrent.futures import ThreadPoolExecutor
sys.path.append('..')
from base.spider import Spider
class Spider(Spider):
def init(self, extend=""):
did=self.getdid()
self.headers.update({'deviceId': did})
token=self.gettk()
self.headers.update({'token': token})
pass
def getName(self):
pass
def isVideoFormat(self, url):
pass
def manualVideoCheck(self):
pass
def destroy(self):
pass
host='http://ldys.sq1005.top'
headers = {
'User-Agent': 'okhttp/4.12.0',
'client': 'app',
'deviceType': 'Android'
}
def homeContent(self, filter):
data=self.post(f"{self.host}/api/v1/app/screen/screenType", headers=self.headers).json()
result = {}
cate = {
"类型": "classify",
"地区": "region",
"年份": "year"
}
sort={
'key':'sreecnTypeEnum',
'name': '排序',
'value':[{'n':'最新','v':'NEWEST'},{'n':'人气','v':'POPULARITY'},{'n':'评分','v':'COLLECT'},{'n':'热搜','v':'HOT'}]
}
classes = []
filters = {}
for k in data['data']:
classes.append({
'type_name': k['name'],
'type_id': k['id']
})
filters[k['id']] = []
for v in k['children']:
filters[k['id']].append({
'name': v['name'],
'key': cate[v['name']],
'value':[{'n':i['name'],'v':i['name']} for i in v['children']]
})
filters[k['id']].append(sort)
result['class'] = classes
result['filters'] = filters
return result
def homeVideoContent(self):
jdata={"condition":64,"pageNum":1,"pageSize":40}
data=self.post(f"{self.host}/api/v1/app/recommend/recommendSubList", headers=self.headers, json=jdata).json()
return {'list':self.getlist(data['data']['records'])}
def categoryContent(self, tid, pg, filter, extend):
jdata = {
'condition': {
'sreecnTypeEnum': 'NEWEST',
'typeId': tid,
},
'pageNum': int(pg),
'pageSize': 40,
}
jdata['condition'].update(extend)
data = self.post(f"{self.host}/api/v1/app/screen/screenMovie", headers=self.headers, json=jdata).json()
result = {}
result['list'] = self.getlist(data['data']['records'])
result['page'] = pg
result['pagecount'] = 9999
result['limit'] = 90
result['total'] = 999999
return result
def detailContent(self, ids):
ids = ids[0].split('@@')
jdata = {"id": int(ids[0]), "typeId": ids[-1]}
v = self.post(f"{self.host}/api/v1/app/play/movieDesc", headers=self.headers, json=jdata).json()
v = v['data']
vod = {
'type_name': v.get('classify'),
'vod_year': v.get('year'),
'vod_area': v.get('area'),
'vod_actor': v.get('star'),
'vod_director': v.get('director'),
'vod_content': v.get('introduce'),
'vod_play_from': '',
'vod_play_url': ''
}
c = self.post(f"{self.host}/api/v1/app/play/movieDetails", headers=self.headers, json=jdata).json()
l = c['data']['moviePlayerList']
n = {str(i['id']): i['moviePlayerName'] for i in l}
m = jdata.copy()
m.update({'playerId': str(l[0]['id'])})
pd = self.getv(m, c['data']['episodeList'])
if len(l)-1:
with ThreadPoolExecutor(max_workers=len(l)-1) as executor:
future_to_player = {executor.submit(self.getd, jdata, player): player for player in l[1:]}
for future in future_to_player:
try:
o,p = future.result()
pd.update(self.getv(o,p))
except Exception as e:
print(f"请求失败: {e}")
w, e = [],[]
for i, x in pd.items():
if x:
w.append(n[i])
e.append(x)
vod['vod_play_from'] = '$$$'.join(w)
vod['vod_play_url'] = '$$$'.join(e)
return {'list': [vod]}
def searchContent(self, key, quick, pg="1"):
jdata={
"condition": {
"value": key
},
"pageNum": int(pg),
"pageSize": 40
}
data=self.post(f"{self.host}/api/v1/app/search/searchMovie", headers=self.headers, json=jdata).json()
return {'list':self.getlist(data['data']['records']),'page':pg}
def playerContent(self, flag, id, vipFlags):
jdata=json.loads(self.d64(id))
data = self.post(f"{self.host}/api/v1/app/play/movieDetails", headers=self.headers, json=jdata).json()
try:
params={'playerUrl':data['data']['url'],'playerId':jdata['playerId']}
pd=self.fetch(f"{self.host}/api/v1/app/play/analysisMovieUrl", headers=self.headers, params=params).json()
url,p=pd['data'],0
except Exception as e:
print(f"请求失败: {e}")
url,p=data['data']['url'],0
return {'parse': p, 'url': url, 'header': {'User-Agent': 'okhttp/4.12.0'}}
def localProxy(self, param):
pass
def liveContent(self, url):
pass
def gettk(self):
data=self.fetch(f"{self.host}/api/v1/app/user/visitorInfo", headers=self.headers).json()
return data['data']['token']
def getdid(self):
did=self.getCache('ldid')
if not did:
hex_chars = '0123456789abcdef'
did =''.join(random.choice(hex_chars) for _ in range(16))
self.setCache('ldid',did)
return did
def getd(self,jdata,player):
x = jdata.copy()
x.update({'playerId': str(player['id'])})
response = self.post(f"{self.host}/api/v1/app/play/movieDetails", headers=self.headers, json=x).json()
return x, response['data']['episodeList']
def getv(self,d,c):
f={d['playerId']:''}
g=[]
for i in c:
j=d.copy()
j.update({'episodeId':str(i['id'])})
g.append(f"{i['episode']}${self.e64(json.dumps(j))}")
f[d['playerId']]='#'.join(g)
return f
def getlist(self,data):
videos = []
for i in data:
videos.append({
'vod_id': f"{i['id']}@@{i['typeId']}",
'vod_name': i.get('name'),
'vod_pic': i.get('cover'),
'vod_year': i.get('year'),
'vod_remarks': i.get('totalEpisode')
})
return videos
def e64(self, text):
try:
text_bytes = text.encode('utf-8')
encoded_bytes = b64encode(text_bytes)
return encoded_bytes.decode('utf-8')
except Exception as e:
print(f"Base64编码错误: {str(e)}")
return ""
def d64(self,encoded_text):
try:
encoded_bytes = encoded_text.encode('utf-8')
decoded_bytes = b64decode(encoded_bytes)
return decoded_bytes.decode('utf-8')
except Exception as e:
print(f"Base64解码错误: {str(e)}")
return ""