更新线路

整体线路:v20250723
南风线路:v20250724
潇洒线路:v07.24.3
This commit is contained in:
Liu
2025-07-24 14:00:50 +08:00
parent 4b7f02622a
commit 993f56c921
34 changed files with 15935 additions and 2860 deletions

View File

@@ -215,7 +215,8 @@
"key": "csp_Lkdy",
"name": "🏔️┃来看┃影视",
"type": 3,
"api": "csp_Lkdy"
"api": "csp_Lkdy",
"ext": "https://lkvod.com"
},
{
"key": "csp_Tvyb",
@@ -285,12 +286,51 @@
"filterable": 1,
"ext": "https://www.czzymovie.com"
},
{
"key": "猎手影视",
"name": "🐆┃猎手┃影视",
"type": 3,
"api": "./api/LSYS.py",
"searchable": 1,
"changeable": 1,
"quickSearch": 1,
"filterable": 1,
"playerType": 2
},
{
"key": "零度",
"name": "🥶┃零度┃影视",
"type": 3,
"api": "./api/lingdu.py"
},
{
"key": "csp_AppXY",
"name": "🎀️┃星牙┃短剧",
"type": 3,
"api": "csp_AppXY"
},
{
"key": "河马短剧",
"name": "🦛┃河马┃短剧",
"type": 3,
"api": "./api/HMDJ.py",
"searchable": 1,
"changeable": 1,
"quickSearch": 1,
"filterable": 1,
"playerType": 2
},
{
"key": "偷乐短剧",
"name": "☺️┃偷乐┃短剧",
"type": 3,
"api": "./api/TLDJ.py",
"searchable": 1,
"changeable": 1,
"quickSearch": 1,
"filterable": 1,
"playerType": 2
},
{
"key": "csp_SP360",
"name": "📺┃360┃官源",
@@ -320,6 +360,27 @@
"api": "csp_Dm84",
"ext": "https://dm84.net"
},
{
"key": "56动漫",
"name": "5⃣┃56┃动漫",
"type": 3,
"api": "./api/drpy2.min.js",
"ext": "./js/56DM.js"
},
{
"key": "NT动漫",
"name": "🧬┃NT┃动漫",
"type": 3,
"api": "./api/drpy2.min.js",
"ext": "./js/NTDM.js"
},
{
"key": "Anime1",
"name": "🌏┃Anime┃动漫",
"type": 3,
"api": "./api/drpy2.min.js",
"ext": "./js/Anime1.js"
},
{
"key": "csp_FourK",
"name": "🌋┃绝对┃影视",

380
肥猫/api/HMDJ.py Normal file
View File

@@ -0,0 +1,380 @@
# -*- coding: utf-8 -*-
import requests
import re
import json
import traceback
import sys
from urllib.parse import quote
sys.path.append('../../')
try:
from base.spider import Spider
except ImportError:
# 定义一个基础接口类,用于本地测试
class Spider:
def init(self, extend=""):
pass
class Spider(Spider):
def __init__(self):
self.siteUrl = "https://www.kuaikaw.cn"
self.cateManual = {
"甜宠": "462",
"古装仙侠": "1102",
"现代言情": "1145",
"青春": "1170",
"豪门恩怨": "585",
"逆袭": "417-464",
"重生": "439-465",
"系统": "1159",
"总裁": "1147",
"职场商战": "943"
}
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0",
"Referer": self.siteUrl,
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8"
}
def getName(self):
return "河马短剧"
def init(self, extend=""):
return
def fetch(self, url, headers=None, retry=2):
"""统一的网络请求接口"""
if headers is None:
headers = self.headers
for i in range(retry + 1):
try:
response = requests.get(url, headers=headers, timeout=10, allow_redirects=True)
response.raise_for_status()
return response
except Exception as e:
if i == retry:
print(f"请求异常: {url}, 错误: {str(e)}")
return None
continue
def isVideoFormat(self, url):
video_formats = ['.mp4', '.mkv', '.avi', '.wmv', '.m3u8', '.flv', '.rmvb']
return any(format in url.lower() for format in video_formats)
def manualVideoCheck(self):
return False
def homeContent(self, filter):
result = {}
classes = [{'type_name': k, 'type_id': v} for k, v in self.cateManual.items()]
result['class'] = classes
try:
result['list'] = self.homeVideoContent()['list']
except:
result['list'] = []
return result
def homeVideoContent(self):
videos = []
try:
response = self.fetch(self.siteUrl)
if not response:
return {'list': []}
html_content = response.text
next_data_pattern = r'<script id="__NEXT_DATA__" type="application/json">(.*?)</script>'
next_data_match = re.search(next_data_pattern, html_content, re.DOTALL)
if not next_data_match:
return {'list': []}
next_data_json = json.loads(next_data_match.group(1))
page_props = next_data_json.get("props", {}).get("pageProps", {})
# 处理轮播图数据
if "bannerList" in page_props:
for banner in page_props["bannerList"]:
if banner.get("bookId"):
videos.append({
"vod_id": f"/drama/{banner['bookId']}",
"vod_name": banner.get("bookName", ""),
"vod_pic": banner.get("coverWap", ""),
"vod_remarks": f"{banner.get('statusDesc', '')} {banner.get('totalChapterNum', '')}".strip()
})
# 处理SEO分类推荐
if "seoColumnVos" in page_props:
for column in page_props["seoColumnVos"]:
for book in column.get("bookInfos", []):
if book.get("bookId"):
videos.append({
"vod_id": f"/drama/{book['bookId']}",
"vod_name": book.get("bookName", ""),
"vod_pic": book.get("coverWap", ""),
"vod_remarks": f"{book.get('statusDesc', '')} {book.get('totalChapterNum', '')}".strip()
})
# 去重处理
seen = set()
unique_videos = []
for video in videos:
key = (video["vod_id"], video["vod_name"])
if key not in seen:
seen.add(key)
unique_videos.append(video)
except Exception as e:
print(f"获取首页推荐内容出错: {e}")
unique_videos = []
return {'list': unique_videos}
def categoryContent(self, tid, pg, filter, extend):
result = {'list': [], 'page': pg, 'pagecount': 1, 'limit': 20, 'total': 0}
url = f"{self.siteUrl}/browse/{tid}/{pg}"
response = self.fetch(url)
if not response:
return result
html_content = response.text
next_data_match = re.search(r'<script id="__NEXT_DATA__" type="application/json">(.*?)</script>', html_content, re.DOTALL)
if not next_data_match:
return result
try:
next_data_json = json.loads(next_data_match.group(1))
page_props = next_data_json.get("props", {}).get("pageProps", {})
current_page = page_props.get("page", 1)
total_pages = page_props.get("pages", 1)
book_list = page_props.get("bookList", [])
videos = []
for book in book_list:
if book.get("bookId"):
videos.append({
"vod_id": f"/drama/{book['bookId']}",
"vod_name": book.get("bookName", ""),
"vod_pic": book.get("coverWap", ""),
"vod_remarks": f"{book.get('statusDesc', '')} {book.get('totalChapterNum', '')}".strip()
})
result.update({
'list': videos,
'page': int(current_page),
'pagecount': total_pages,
'limit': len(videos),
'total': len(videos) * total_pages if videos else 0
})
except Exception as e:
print(f"分类内容获取出错: {e}")
return result
def searchContent(self, key, quick, pg=1):
return self.searchContentPage(key, quick, pg)
def searchContentPage(self, key, quick, pg=1):
result = {'list': [], 'page': pg, 'pagecount': 1, 'limit': 20, 'total': 0}
search_url = f"{self.siteUrl}/search?searchValue={quote(key)}&page={pg}"
response = self.fetch(search_url)
if not response:
return result
html_content = response.text
next_data_match = re.search(r'<script id="__NEXT_DATA__" type="application/json">(.*?)</script>', html_content, re.DOTALL)
if not next_data_match:
return result
try:
next_data_json = json.loads(next_data_match.group(1))
page_props = next_data_json.get("props", {}).get("pageProps", {})
total_pages = page_props.get("pages", 1)
book_list = page_props.get("bookList", [])
videos = []
for book in book_list:
if book.get("bookId"):
videos.append({
"vod_id": f"/drama/{book['bookId']}",
"vod_name": book.get("bookName", ""),
"vod_pic": book.get("coverWap", ""),
"vod_remarks": f"{book.get('statusDesc', '')} {book.get('totalChapterNum', '')}".strip()
})
result.update({
'list': videos,
'pagecount': total_pages,
'total': len(videos) * total_pages if videos else 0
})
except Exception as e:
print(f"搜索内容出错: {e}")
return result
def detailContent(self, ids):
result = {'list': []}
if not ids:
return result
vod_id = ids[0]
if not vod_id.startswith('/drama/'):
vod_id = f'/drama/{vod_id}'
drama_url = f"{self.siteUrl}{vod_id}"
response = self.fetch(drama_url)
if not response:
return result
html = response.text
next_data_match = re.search(r'<script id="__NEXT_DATA__" type="application/json">(.*?)</script>', html, re.DOTALL)
if not next_data_match:
return result
try:
next_data = json.loads(next_data_match.group(1))
page_props = next_data.get("props", {}).get("pageProps", {})
book_info = page_props.get("bookInfoVo", {})
chapter_list = page_props.get("chapterList", [])
if not book_info.get("bookId"):
return result
# 基本信息
categories = [c.get("name", "") for c in book_info.get("categoryList", [])]
performers = [p.get("name", "") for p in book_info.get("performerList", [])]
vod = {
"vod_id": vod_id,
"vod_name": book_info.get("title", ""),
"vod_pic": book_info.get("coverWap", ""),
"type_name": ",".join(categories),
"vod_year": "",
"vod_area": book_info.get("countryName", ""),
"vod_remarks": f"{book_info.get('statusDesc', '')} {book_info.get('totalChapterNum', '')}".strip(),
"vod_actor": ", ".join(performers),
"vod_director": "",
"vod_content": book_info.get("introduction", "")
}
# 处理剧集
play_urls = self.processEpisodes(vod_id, chapter_list)
if play_urls:
vod['vod_play_from'] = '河马剧场'
vod['vod_play_url'] = '$$$'.join(play_urls)
result['list'] = [vod]
except Exception as e:
print(f"详情页解析出错: {e}")
traceback.print_exc()
return result
def processEpisodes(self, vod_id, chapter_list):
play_urls = []
episodes = []
for chapter in chapter_list:
chapter_id = chapter.get("chapterId", "")
chapter_name = chapter.get("chapterName", "")
if not chapter_id or not chapter_name:
continue
# 尝试获取直接视频链接
video_url = self.getDirectVideoUrl(chapter)
if video_url:
episodes.append(f"{chapter_name}${video_url}")
continue
# 回退方案
episodes.append(f"{chapter_name}${vod_id}${chapter_id}${chapter_name}")
if episodes:
play_urls.append("#".join(episodes))
return play_urls
def getDirectVideoUrl(self, chapter):
if "chapterVideoVo" not in chapter or not chapter["chapterVideoVo"]:
return None
video_info = chapter["chapterVideoVo"]
for key in ["mp4", "mp4720p", "vodMp4Url"]:
if key in video_info and video_info[key] and ".mp4" in video_info[key].lower():
return video_info[key]
return None
def playerContent(self, flag, id, vipFlags):
result = {
"parse": 0,
"url": id,
"header": json.dumps(self.headers)
}
# 如果已经是视频链接直接返回
if 'http' in id and ('.mp4' in id or '.m3u8' in id):
return result
# 解析参数
parts = id.split('$')
if len(parts) < 2:
return result
drama_id = parts[0].replace('/drama/', '')
chapter_id = parts[1]
# 尝试获取视频链接
video_url = self.getEpisodeVideoUrl(drama_id, chapter_id)
if video_url:
result["url"] = video_url
return result
def getEpisodeVideoUrl(self, drama_id, chapter_id):
episode_url = f"{self.siteUrl}/episode/{drama_id}/{chapter_id}"
response = self.fetch(episode_url)
if not response:
return None
html = response.text
# 方法1: 从NEXT_DATA提取
next_data_match = re.search(r'<script id="__NEXT_DATA__".*?>(.*?)</script>', html, re.DOTALL)
if next_data_match:
try:
next_data = json.loads(next_data_match.group(1))
page_props = next_data.get("props", {}).get("pageProps", {})
chapter_info = page_props.get("chapterInfo", {})
if chapter_info and "chapterVideoVo" in chapter_info:
video_info = chapter_info["chapterVideoVo"]
for key in ["mp4", "mp4720p", "vodMp4Url"]:
if key in video_info and video_info[key] and ".mp4" in video_info[key].lower():
return video_info[key]
except:
pass
# 方法2: 直接从HTML提取
mp4_matches = re.findall(r'(https?://[^"\']+\.mp4)', html)
if mp4_matches:
for url in mp4_matches:
if chapter_id in url or drama_id in url:
return url
return mp4_matches[0]
return None
def localProxy(self, param):
return [200, "video/MP2T", {}, param]
def destroy(self):
pass

279
肥猫/api/LSYS.py Normal file
View File

@@ -0,0 +1,279 @@
# coding=utf-8
# !/usr/bin/python
# by嗷呜(finally)
import sys
import os
sys.path.append("..")
import re
import hashlib
import hmac
import random
import string
from Crypto.Util.Padding import unpad
from concurrent.futures import ThreadPoolExecutor
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_v1_5, AES
from base64 import b64encode, b64decode
import json
import time
from base.spider import Spider
class Spider(Spider):
def getName(self):
return "电影猎手"
def init(self, extend=""):
self.device = self.device_id()
self.host = self.gethost()
pass
def isVideoFormat(self, url):
pass
def manualVideoCheck(self):
pass
def action(self, action):
pass
def destroy(self):
pass
t = str(int(time.time()))
def homeContent(self, filter):
result = {}
filters = {}
classes = []
bba = self.url()
data = self.fetch(f"{self.host}/api/v1/app/config?pack={bba[0]}&signature={bba[1]}", headers=self.header()).text
data1 = self.aes(data)
dy = {"class":"类型","area":"地区","lang":"语言","year":"年份","letter":"字母","by":"排序","sort":"排序"}
data1['data']['movie_screen']['sort'].pop(0)
for item in data1['data']['movie_screen']['sort']:
item['n'] = item.pop('name')
item['v'] = item.pop('value')
for item in data1['data']['movie_screen']['filter']:
has_non_empty_field = False
classes.append({"type_name": item["name"], "type_id": str(item["id"])})
for key in dy:
if key in item and item[key]:
has_non_empty_field = True
break
if has_non_empty_field:
filters[str(item["id"])] = []
filters[str(item["id"])].append(
{"key": 'sort', "name": '排序', "value": data1['data']['movie_screen']['sort']})
for dkey in item:
if dkey in dy and item[dkey]:
item[dkey].pop(0)
value_array = [
{"n": value.strip(), "v": value.strip()}
for value in item[dkey]
if value.strip() != ""
]
filters[str(item["id"])].append(
{"key": dkey, "name": dy[dkey], "value": value_array}
)
result["class"] = classes
result["filters"] = filters
return result
def homeVideoContent(self):
bba = self.url()
url = f'{self.host}/api/v1/movie/index_recommend?pack={bba[0]}&signature={bba[1]}'
data = self.fetch(url, headers=self.header()).json()
videos = []
for item in data['data']:
if len(item['list']) > 0:
for it in item['list']:
try:
videos.append(self.voides(it))
except Exception as e:
continue
result = {"list": videos}
return result
def categoryContent(self, tid, pg, filter, extend):
body = {"type_id": tid, "sort": extend.get("sort", "by_default"), "class": extend.get("class", "类型"),
"area": extend.get("area", "地区"), "year": extend.get("year", "年份"), "page": str(pg),
"pageSize": "21"}
result = {}
list = []
bba = self.url(body)
url = f"{self.host}/api/v1/movie/screen/list?pack={bba[0]}&signature={bba[1]}"
data = self.fetch(url, headers=self.header()).json()['data']['list']
for item in data:
list.append(self.voides(item))
result["list"] = list
result["page"] = pg
result["pagecount"] = 9999
result["limit"] = 90
result["total"] = 999999
return result
def detailContent(self, ids):
body = {"id": ids[0]}
bba = self.url(body)
url = f'{self.host}/api/v1/movie/detail?pack={bba[0]}&signature={bba[1]}'
data = self.fetch(url, headers=self.header()).json()['data']
video = {'vod_name': data.get('name'),'type_name': data.get('type_name'),'vod_year': data.get('year'),'vod_area': data.get('area'),'vod_remarks': data.get('dynami'),'vod_content': data.get('content')}
play = []
names = []
tasks = []
for itt in data["play_from"]:
name = itt["name"]
a = []
if len(itt["list"]) > 0:
names.append(name)
play.append(self.playeach(itt['list']))
else:
tasks.append({"movie_id": ids[0], "from_code": itt["code"]})
names.append(name)
if tasks:
with ThreadPoolExecutor(max_workers=len(tasks)) as executor:
results = executor.map(self.playlist, tasks)
for result in results:
if result:
play.append(result)
else:
play.append("")
video["vod_play_from"] = "$$$".join(names)
video["vod_play_url"] = "$$$".join(play)
result = {"list": [video]}
return result
def searchContent(self, key, quick, pg=1):
body = {"keyword": key, "sort": "", "type_id": "0", "page": str(pg), "pageSize": "10",
"res_type": "by_movie_name"}
bba = self.url(body)
url = f"{self.host}/api/v1/movie/search?pack={bba[0]}&signature={bba[1]}"
data = self.fetch(url, headers=self.header()).json()['data'].get('list')
videos = []
for it in data:
try:
videos.append(self.voides(it))
except Exception as e:
continue
result = {"list": videos, "page": pg}
return result
def playerContent(self, flag, id, vipFlags):
url = id
if "m3u8" not in url and "mp4" not in url:
try:
add = id.split('|||')
data = {"from_code": add[0], "play_url": add[1], "episode_id": add[2], "type": "play"}
bba = self.url(data)
data2 = self.fetch(f"{self.host}/api/v1/movie_addr/parse_url?pack={bba[0]}&signature={bba[1]}",
headers=self.header()).json()['data']
url = data2.get('play_url') or data2.get('download_url')
try:
url1 = self.fetch(url, headers=self.header(), allow_redirects=False).headers['Location']
if url1 and "http" in url1:
url = url1
except:
pass
except Exception as e:
pass
if '.jpg' in url or '.jpeg' in url or '.png' in url:
url = self.getProxyUrl() + "&url=" + b64encode(url.encode('utf-8')).decode('utf-8') + "&type=m3u8"
result = {}
result["parse"] = 0
result["url"] = url
result["header"] = {'user-agent': 'okhttp/4.9.2'}
return result
def localProxy(self, param):
url = b64decode(param["url"]).decode('utf-8')
durl = url[:url.rfind('/')]
data = self.fetch(url, headers=self.header()).content.decode("utf-8")
lines = data.strip().split('\n')
for index, string in enumerate(lines):
# if 'URI="' in string and 'http' not in string:
# lines[index] = index
# 暂时预留,貌似用不到
if '#EXT' not in string and 'http' not in string:
lines[index] = durl + ('' if string.startswith('/') else '/') + string
data = '\n'.join(lines)
return [200, "application/vnd.apple.mpegur", data]
def device_id(self):
characters = string.ascii_lowercase + string.digits
random_string = ''.join(random.choices(characters, k=32))
return random_string
def gethost(self):
headers = {
'User-Agent': 'okhttp/4.9.2',
'Connection': 'Keep-Alive',
}
response = self.fetch('https://app-site.ecoliving168.com/domain_v5.json', headers=headers).json()
url = response['api_service'].replace('/api/', '')
return url
def header(self):
headers = {
'User-Agent': 'Android',
'Accept': 'application/prs.55App.v2+json',
'timestamp': self.t,
'x-client-setting': '{"pure-mode":1}',
'x-client-uuid': '{"device_id":' + self.device + '}, "type":1,"brand":"Redmi", "model":"M2012K10C", "system_version":30, "sdk_version":"3.1.0.7"}',
'x-client-version': '3096 '
}
return headers
def url(self, id=None):
if not id:
id = {}
id["timestamp"] = self.t
public_key = 'MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA02F/kPg5A2NX4qZ5JSns+bjhVMCC6JbTiTKpbgNgiXU+Kkorg6Dj76gS68gB8llhbUKCXjIdygnHPrxVHWfzmzisq9P9awmXBkCk74Skglx2LKHa/mNz9ivg6YzQ5pQFUEWS0DfomGBXVtqvBlOXMCRxp69oWaMsnfjnBV+0J7vHbXzUIkqBLdXSNfM9Ag5qdRDrJC3CqB65EJ3ARWVzZTTcXSdMW9i3qzEZPawPNPe5yPYbMZIoXLcrqvEZnRK1oak67/ihf7iwPJqdc+68ZYEmmdqwunOvRdjq89fQMVelmqcRD9RYe08v+xDxG9Co9z7hcXGTsUquMxkh29uNawIDAQAB'
encrypted_text = json.dumps(id)
public_key = RSA.import_key(b64decode(public_key))
cipher = PKCS1_v1_5.new(public_key)
encrypted_message = cipher.encrypt(encrypted_text.encode('utf-8'))
encrypted_message_base64 = b64encode(encrypted_message).decode('utf-8')
result = encrypted_message_base64.replace('+', '-').replace('/', '_').replace('=', '')
key = '635a580fcb5dc6e60caa39c31a7bde48'
sign = hmac.new(key.encode(), result.encode(), hashlib.md5).hexdigest()
return result, sign
def playlist(self, body):
try:
bba = self.url(body)
url = f'{self.host}/api/v1/movie_addr/list?pack={bba[0]}&signature={bba[1]}'
data = self.fetch(url, headers=self.header()).json()['data']
return self.playeach(data)
except Exception:
return []
def playeach(self,data):
play_urls = []
for it in data:
if re.search(r"mp4|m3u8", it["play_url"]):
play_urls.append(f"{it['episode_name']}${it['play_url']}")
else:
play_urls.append(
f"{it['episode_name']}${it['from_code']}|||{it['play_url']}|||{it['episode_id']}"
)
return '#'.join(play_urls)
def voides(self, item):
if item['name'] or item['title']:
voide = {
"vod_id": item.get('id') or item.get('click'),
'vod_name': item.get('name') or item.get('title'),
'vod_pic': item.get('cover') or item.get('image'),
'vod_year': item.get('year') or item.get('label'),
'vod_remarks': item.get('dynamic') or item.get('sub_title')
}
return voide
def aes(self, text):
text = text.replace('-', '+').replace('_', '/') + '=='
key = b"e6d5de5fcc51f53d"
iv = b"2f13eef7dfc6c613"
cipher = AES.new(key, AES.MODE_CBC, iv)
pt = unpad(cipher.decrypt(b64decode(text)), AES.block_size).decode("utf-8")
return json.loads(pt)

790
肥猫/api/TLDJ.py Normal file
View File

@@ -0,0 +1,790 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 偷乐短剧爬虫
import sys
import json
import re
import time
import urllib.parse
import requests
from bs4 import BeautifulSoup
# 导入基础类
sys.path.append('../../')
try:
from base.spider import Spider
except ImportError:
# 本地调试时的替代实现
class Spider:
def init(self, extend=""):
pass
class Spider(Spider):
def __init__(self):
# 网站主URL
self.siteUrl = "https://www.toule.top"
# 根据网站实际结构,分类链接格式为: /index.php/vod/show/class/分类名/id/1.html
# 分类ID映射 - 从网站中提取的分类
self.cateManual = {
"男频": "/index.php/vod/show/class/%E7%94%B7%E9%A2%91/id/1.html",
"女频": "/index.php/vod/show/class/%E5%A5%B3%E9%A2%91/id/1.html",
"都市": "/index.php/vod/show/class/%E9%83%BD%E5%B8%82/id/1.html",
"赘婿": "/index.php/vod/show/class/%E8%B5%98%E5%A9%BF/id/1.html",
"战神": "/index.php/vod/show/class/%E6%88%98%E7%A5%9E/id/1.html",
"古代言情": "/index.php/vod/show/class/%E5%8F%A4%E4%BB%A3%E8%A8%80%E6%83%85/id/1.html",
"现代言情": "/index.php/vod/show/class/%E7%8E%B0%E4%BB%A3%E8%A8%80%E6%83%85/id/1.html",
"历史": "/index.php/vod/show/class/%E5%8E%86%E5%8F%B2/id/1.html",
"玄幻": "/index.php/vod/show/class/%E7%8E%84%E5%B9%BB/id/1.html",
"搞笑": "/index.php/vod/show/class/%E6%90%9E%E7%AC%91/id/1.html",
"甜宠": "/index.php/vod/show/class/%E7%94%9C%E5%AE%A0/id/1.html",
"励志": "/index.php/vod/show/class/%E5%8A%B1%E5%BF%97/id/1.html",
"逆袭": "/index.php/vod/show/class/%E9%80%86%E8%A2%AD/id/1.html",
"穿越": "/index.php/vod/show/class/%E7%A9%BF%E8%B6%8A/id/1.html",
"古装": "/index.php/vod/show/class/%E5%8F%A4%E8%A3%85/id/1.html"
}
# 请求头
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
"Referer": "https://www.toule.top/",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
}
# 缓存
self.cache = {}
self.cache_timeout = {}
def getName(self):
return "偷乐短剧"
def init(self, extend=""):
# 初始化方法,可以留空
return
def isVideoFormat(self, url):
"""判断是否为视频格式"""
video_formats = ['.mp4', '.m3u8', '.ts', '.flv', '.avi', '.mkv', '.mov', '.rmvb', '.3gp']
for format in video_formats:
if format in url.lower():
return True
return False
def manualVideoCheck(self):
"""是否需要手动检查视频"""
return False
# 工具方法 - 网络请求
def fetch(self, url, headers=None, data=None, method="GET"):
"""统一的网络请求方法"""
try:
if headers is None:
headers = self.headers.copy()
if method.upper() == "GET":
response = requests.get(url, headers=headers, params=data, timeout=10,verify=False)
else: # POST
response = requests.post(url, headers=headers, data=data, timeout=10,verify=False)
response.raise_for_status()
response.encoding = response.apparent_encoding or 'utf-8'
return response
except Exception as e:
self.log(f"请求失败: {url}, 错误: {str(e)}", "ERROR")
return None
# 缓存方法
def getCache(self, key, timeout=3600):
"""获取缓存数据"""
if key in self.cache and key in self.cache_timeout:
if time.time() < self.cache_timeout[key]:
return self.cache[key]
else:
del self.cache[key]
del self.cache_timeout[key]
return None
def setCache(self, key, value, timeout=3600):
"""设置缓存数据"""
self.cache[key] = value
self.cache_timeout[key] = time.time() + timeout
# 日志方法
def log(self, msg, level='INFO'):
"""记录日志"""
levels = {
'DEBUG': 0,
'INFO': 1,
'WARNING': 2,
'ERROR': 3
}
current_level = 'INFO' # 可以设置为DEBUG以获取更多信息
if levels.get(level, 4) >= levels.get(current_level, 1):
print(f"[{level}] {time.strftime('%Y-%m-%d %H:%M:%S')} - {msg}")
# 辅助方法 - 从URL中提取视频ID
def extractVodId(self, url):
"""从URL中提取视频ID"""
# 路径格式: /index.php/vod/play/id/9024/sid/1/nid/1.html
match = re.search(r'/id/(\d+)/', url)
if match:
return match.group(1)
return ""
# 辅助方法 - 从网页内容中提取分类
def extractCategories(self, text):
"""从网页内容中提取分类标签"""
cats = []
# 匹配标签字符串,例如: "男频,逆袭,亲情,短剧"
if "," in text:
parts = text.split(",")
for part in parts:
part = part.strip()
if part and part != "短剧":
cats.append(part)
return cats
# 主要接口实现
def homeContent(self, filter):
"""获取首页分类及内容"""
result = {}
classes = []
# 从缓存获取
cache_key = 'home_classes'
cached_classes = self.getCache(cache_key)
if cached_classes:
classes = cached_classes
else:
# 使用预定义的分类
for k, v in self.cateManual.items():
classes.append({
'type_id': v, # 使用完整URL路径作为type_id
'type_name': k
})
# 保存到缓存
self.setCache(cache_key, classes, 24*3600) # 缓存24小时
result['class'] = classes
# 获取首页推荐视频
videos = self.homeVideoContent().get('list', [])
result['list'] = videos
return result
def homeVideoContent(self):
"""获取首页推荐视频内容"""
result = {'list': []}
videos = []
# 从缓存获取
cache_key = 'home_videos'
cached_videos = self.getCache(cache_key)
if cached_videos:
return {'list': cached_videos}
try:
response = self.fetch(self.siteUrl)
if response and response.status_code == 200:
html = response.text
soup = BeautifulSoup(html, 'html.parser')
# 查找最新更新区域
latest_section = soup.find('h2', text=lambda t: t and '最新更新' in t)
if latest_section:
container = latest_section.parent # 获取容器
if container:
# 查找所有 li.item 元素
items = container.find_all('li', class_='item')
for item in items:
try:
# 获取链接和标题
title_link = item.find('h3')
if not title_link:
continue
title = title_link.text.strip()
# 获取第一个链接作为详情页链接
link_tag = item.find('a')
if not link_tag:
continue
link = link_tag.get('href', '')
if not link.startswith('http'):
link = urllib.parse.urljoin(self.siteUrl, link)
# 提取ID
vid = self.extractVodId(link)
if not vid:
continue
# 获取图片
img_tag = item.find('img')
img_url = ""
if img_tag:
img_url = img_tag.get('src', img_tag.get('data-src', ''))
if img_url and not img_url.startswith('http'):
img_url = urllib.parse.urljoin(self.siteUrl, img_url)
# 获取备注信息
remarks = ""
remarks_tag = item.find('span', class_='remarks')
if remarks_tag:
remarks = remarks_tag.text.strip()
# 获取标签信息
tags = ""
tags_tag = item.find('span', class_='tags')
if tags_tag:
tags = tags_tag.text.strip()
# 合并备注和标签
if remarks and tags:
remarks = f"{remarks} | {tags}"
elif tags:
remarks = tags
# 构建视频项
videos.append({
'vod_id': vid,
'vod_name': title,
'vod_pic': img_url,
'vod_remarks': remarks
})
except Exception as e:
self.log(f"处理视频项时出错: {str(e)}", "ERROR")
continue
# 保存到缓存
self.setCache(cache_key, videos, 3600) # 缓存1小时
except Exception as e:
self.log(f"获取首页视频内容发生错误: {str(e)}", "ERROR")
result['list'] = videos
return result
def categoryContent(self, tid, pg, filter, extend):
"""获取分类内容"""
result = {}
videos = []
# 处理页码
if pg is None:
pg = 1
else:
pg = int(pg)
# 构建分类URL - tid是完整的URL路径
if tid.startswith("/"):
# 替换页码URL格式可能像: /index.php/vod/show/class/男频/id/1.html
if pg > 1:
if "html" in tid:
category_url = tid.replace(".html", f"/page/{pg}.html")
else:
category_url = f"{tid}/page/{pg}.html"
else:
category_url = tid
full_url = urllib.parse.urljoin(self.siteUrl, category_url)
else:
# 如果tid不是URL路径可能是旧版分类ID尝试查找对应URL
category_url = ""
for name, url in self.cateManual.items():
if name == tid:
category_url = url
break
if not category_url:
self.log(f"未找到分类ID对应的URL: {tid}", "ERROR")
result['list'] = []
result['page'] = pg
result['pagecount'] = 1
result['limit'] = 0
result['total'] = 0
return result
# 处理页码
if pg > 1:
if "html" in category_url:
category_url = category_url.replace(".html", f"/page/{pg}.html")
else:
category_url = f"{category_url}/page/{pg}.html"
full_url = urllib.parse.urljoin(self.siteUrl, category_url)
# 请求分类页
try:
response = self.fetch(full_url)
if response and response.status_code == 200:
html = response.text
soup = BeautifulSoup(html, 'html.parser')
# 查找视频项根据实际HTML结构调整
items = soup.find_all('li', class_='item')
for item in items:
try:
# 获取链接和标题
title_tag = item.find('h3')
if not title_tag:
continue
title = title_tag.text.strip()
# 获取链接
link_tag = item.find('a')
if not link_tag:
continue
link = link_tag.get('href', '')
if not link.startswith('http'):
link = urllib.parse.urljoin(self.siteUrl, link)
# 提取ID
vid = self.extractVodId(link)
if not vid:
continue
# 获取图片
img_tag = item.find('img')
img_url = ""
if img_tag:
img_url = img_tag.get('src', img_tag.get('data-src', ''))
if img_url and not img_url.startswith('http'):
img_url = urllib.parse.urljoin(self.siteUrl, img_url)
# 获取备注信息
remarks = ""
remarks_tag = item.find('span', class_='remarks')
if remarks_tag:
remarks = remarks_tag.text.strip()
# 获取标签信息
tags = ""
tags_tag = item.find('span', class_='tags')
if tags_tag:
tags = tags_tag.text.strip()
# 合并备注和标签
if remarks and tags:
remarks = f"{remarks} | {tags}"
elif tags:
remarks = tags
# 构建视频项
videos.append({
'vod_id': vid,
'vod_name': title,
'vod_pic': img_url,
'vod_remarks': remarks
})
except Exception as e:
self.log(f"处理分类视频项时出错: {str(e)}", "ERROR")
continue
# 查找分页信息
# 默认值
total = len(videos)
pagecount = 1
limit = 20
# 尝试查找分页元素
pagination = soup.find('ul', class_='page')
if pagination:
# 查找最后一页的链接
last_page_links = pagination.find_all('a')
for link in last_page_links:
page_text = link.text.strip()
if page_text.isdigit():
pagecount = max(pagecount, int(page_text))
except Exception as e:
self.log(f"获取分类内容发生错误: {str(e)}", "ERROR")
result['list'] = videos
result['page'] = pg
result['pagecount'] = pagecount
result['limit'] = limit
result['total'] = total
return result
def detailContent(self, ids):
"""获取详情内容"""
result = {}
if not ids or len(ids) == 0:
return result
# 视频ID
vid = ids[0]
# 构建播放页URL
play_url = f"{self.siteUrl}/index.php/vod/play/id/{vid}/sid/1/nid/1.html"
try:
response = self.fetch(play_url)
if not response or response.status_code != 200:
return result
html = response.text
soup = BeautifulSoup(html, 'html.parser')
# 提取视频基本信息
# 标题
title = ""
title_tag = soup.find('h1', class_='items-title')
if title_tag:
title = title_tag.text.strip()
# 图片
pic = ""
pic_tag = soup.find('img', class_='thumb')
if pic_tag:
pic = pic_tag.get('src', '')
if pic and not pic.startswith('http'):
pic = urllib.parse.urljoin(self.siteUrl, pic)
# 简介
desc = ""
desc_tag = soup.find('div', class_='text-content')
if desc_tag:
desc = desc_tag.text.strip()
# 标签/分类
tags = []
tags_container = soup.find('span', class_='items-tags')
if tags_container:
tag_links = tags_container.find_all('a')
for tag in tag_links:
tag_text = tag.text.strip()
if tag_text:
tags.append(tag_text)
# 提取播放列表
play_from = "偷乐短剧"
play_list = []
# 查找播放列表区域
play_area = soup.find('div', class_='swiper-wrapper')
if play_area:
# 查找所有剧集链接
episode_links = play_area.find_all('a')
for ep in episode_links:
ep_title = ep.text.strip()
ep_url = ep.get('href', '')
if ep_url:
# 直接使用URL作为ID
if not ep_url.startswith('http'):
ep_url = urllib.parse.urljoin(self.siteUrl, ep_url)
# 提取集数信息
ep_num = ep_title
if ep_num.isdigit():
ep_num = f"{ep_num}"
play_list.append(f"{ep_num}${ep_url}")
# 如果没有找到播放列表,查找播放按钮
if not play_list:
play_btn = soup.find('a', class_='btn-play')
if play_btn:
play_url = play_btn.get('href', '')
if play_url:
if not play_url.startswith('http'):
play_url = urllib.parse.urljoin(self.siteUrl, play_url)
play_list.append(f"播放${play_url}")
# 如果仍然没有找到播放链接使用播放页URL
if not play_list:
play_url = f"{self.siteUrl}/index.php/vod/play/id/{vid}/sid/1/nid/1.html"
play_list.append(f"播放${play_url}")
# 提取更多信息(导演、演员等)
director = ""
actor = ""
year = ""
area = ""
remarks = ""
# 查找备注信息
meta_items = soup.find_all('div', class_='meta-item')
for item in meta_items:
item_title = item.find('span', class_='item-title')
item_content = item.find('span', class_='item-content')
if item_title and item_content:
title_text = item_title.text.strip()
content_text = item_content.text.strip()
if "导演" in title_text:
director = content_text
elif "主演" in title_text:
actor = content_text
elif "年份" in title_text:
year = content_text
elif "地区" in title_text:
area = content_text
elif "简介" in title_text:
if not desc:
desc = content_text
elif "状态" in title_text:
remarks = content_text
# 如果没有从meta-item中获取到remarks
if not remarks:
remarks_tag = soup.find('span', class_='remarks')
if remarks_tag:
remarks = remarks_tag.text.strip()
# 构建标准数据结构
vod = {
"vod_id": vid,
"vod_name": title,
"vod_pic": pic,
"vod_year": year,
"vod_area": area,
"vod_remarks": remarks,
"vod_actor": actor,
"vod_director": director,
"vod_content": desc,
"type_name": ",".join(tags),
"vod_play_from": play_from,
"vod_play_url": "#".join(play_list)
}
result = {
'list': [vod]
}
except Exception as e:
self.log(f"获取详情内容时出错: {str(e)}", "ERROR")
return result
def searchContent(self, key, quick, pg=1):
"""搜索功能"""
result = {}
videos = []
# 构建搜索URL和参数
search_url = f"{self.siteUrl}/index.php/vod/search.html"
params = {"wd": key}
try:
response = self.fetch(search_url, data=params)
if response and response.status_code == 200:
html = response.text
soup = BeautifulSoup(html, 'html.parser')
# 查找搜索结果项
search_items = soup.find_all('li', class_='item')
for item in search_items:
try:
# 获取标题
title_tag = item.find('h3')
if not title_tag:
continue
title = title_tag.text.strip()
# 获取链接
link_tag = item.find('a')
if not link_tag:
continue
link = link_tag.get('href', '')
if not link.startswith('http'):
link = urllib.parse.urljoin(self.siteUrl, link)
# 提取视频ID
vid = self.extractVodId(link)
if not vid:
continue
# 获取图片
img_tag = item.find('img')
img_url = ""
if img_tag:
img_url = img_tag.get('src', img_tag.get('data-src', ''))
if img_url and not img_url.startswith('http'):
img_url = urllib.parse.urljoin(self.siteUrl, img_url)
# 获取备注信息
remarks = ""
remarks_tag = item.find('span', class_='remarks')
if remarks_tag:
remarks = remarks_tag.text.strip()
# 获取标签信息
tags = ""
tags_tag = item.find('span', class_='tags')
if tags_tag:
tags = tags_tag.text.strip()
# 合并备注和标签
if remarks and tags:
remarks = f"{remarks} | {tags}"
elif tags:
remarks = tags
# 构建视频项
videos.append({
'vod_id': vid,
'vod_name': title,
'vod_pic': img_url,
'vod_remarks': remarks
})
except Exception as e:
self.log(f"处理搜索结果时出错: {str(e)}", "ERROR")
continue
except Exception as e:
self.log(f"搜索功能发生错误: {str(e)}", "ERROR")
result['list'] = videos
return result
def searchContentPage(self, key, quick, pg=1):
return self.searchContent(key, quick, pg)
def playerContent(self, flag, id, vipFlags):
"""获取播放内容"""
result = {}
try:
# 判断是否已经是视频URL
if self.isVideoFormat(id):
result["parse"] = 0
result["url"] = id
result["playUrl"] = ""
result["header"] = json.dumps(self.headers)
return result
# 判断是否是完整的页面URL
if id.startswith(('http://', 'https://')):
play_url = id
# 尝试作为相对路径处理
elif id.startswith('/'):
play_url = urllib.parse.urljoin(self.siteUrl, id)
# 假设是视频ID构建播放页面URL
else:
# 检查是否是"视频ID_集数"格式
parts = id.split('_')
if len(parts) > 1 and parts[0].isdigit():
vid = parts[0]
nid = parts[1]
play_url = f"{self.siteUrl}/index.php/vod/play/id/{vid}/sid/1/nid/{nid}.html"
else:
# 直接当作视频ID处理
play_url = f"{self.siteUrl}/index.php/vod/play/id/{id}/sid/1/nid/1.html"
# 访问播放页获取真实播放地址
try:
self.log(f"正在解析播放页面: {play_url}")
response = self.fetch(play_url)
if response and response.status_code == 200:
html = response.text
# 查找player_aaaa变量
player_match = re.search(r'var\s+player_aaaa\s*=\s*({.*?});', html, re.DOTALL)
if player_match:
try:
player_data = json.loads(player_match.group(1))
if 'url' in player_data:
video_url = player_data['url']
if not video_url.startswith('http'):
video_url = urllib.parse.urljoin(self.siteUrl, video_url)
self.log(f"从player_aaaa获取到视频地址: {video_url}")
result["parse"] = 0
result["url"] = video_url
result["playUrl"] = ""
result["header"] = json.dumps(self.headers)
return result
except json.JSONDecodeError as e:
self.log(f"解析player_aaaa JSON出错: {str(e)}", "ERROR")
# 如果player_aaaa解析失败尝试其他方式
# 1. 查找video标签
video_match = re.search(r'<video[^>]*src=["\'](.*?)["\']', html)
if video_match:
video_url = video_match.group(1)
if not video_url.startswith('http'):
video_url = urllib.parse.urljoin(self.siteUrl, video_url)
self.log(f"从video标签找到视频地址: {video_url}")
result["parse"] = 0
result["url"] = video_url
result["playUrl"] = ""
result["header"] = json.dumps(self.headers)
return result
# 2. 查找iframe
iframe_match = re.search(r'<iframe[^>]*src=["\'](.*?)["\']', html)
if iframe_match:
iframe_url = iframe_match.group(1)
if not iframe_url.startswith('http'):
iframe_url = urllib.parse.urljoin(self.siteUrl, iframe_url)
self.log(f"找到iframe正在解析: {iframe_url}")
# 访问iframe内容
iframe_response = self.fetch(iframe_url)
if iframe_response and iframe_response.status_code == 200:
iframe_html = iframe_response.text
# 在iframe内容中查找视频地址
iframe_video_match = re.search(r'(https?://[^\'"]+\.(mp4|m3u8|ts))', iframe_html)
if iframe_video_match:
video_url = iframe_video_match.group(1)
self.log(f"从iframe中找到视频地址: {video_url}")
result["parse"] = 0
result["url"] = video_url
result["playUrl"] = ""
result["header"] = json.dumps({
"User-Agent": self.headers["User-Agent"],
"Referer": iframe_url
})
return result
# 3. 查找任何可能的视频URL
url_match = re.search(r'(https?://[^\'"]+\.(mp4|m3u8|ts))', html)
if url_match:
video_url = url_match.group(1)
self.log(f"找到可能的视频地址: {video_url}")
result["parse"] = 0
result["url"] = video_url
result["playUrl"] = ""
result["header"] = json.dumps(self.headers)
return result
except Exception as e:
self.log(f"解析播放地址时出错: {str(e)}", "ERROR")
# 如果所有方式都失败,返回外部解析标志
self.log("未找到直接可用的视频地址,需要外部解析", "WARNING")
result["parse"] = 1 # 表示需要外部解析
result["url"] = play_url # 返回播放页面URL
result["playUrl"] = ""
result["header"] = json.dumps(self.headers)
except Exception as e:
self.log(f"获取播放内容时出错: {str(e)}", "ERROR")
return result
def localProxy(self, param):
"""本地代理"""
return [404, "text/plain", {}, "Not Found"]

360
肥猫/api/jsencrypt.js Normal file

File diff suppressed because one or more lines are too long

220
肥猫/api/lingdu.py Normal file
View File

@@ -0,0 +1,220 @@
# -*- coding: utf-8 -*-
# by @嗷呜
import json
import random
import sys
from base64 import b64encode, b64decode
from concurrent.futures import ThreadPoolExecutor
sys.path.append('..')
from base.spider import Spider
class Spider(Spider):
def init(self, extend=""):
did=self.getdid()
self.headers.update({'deviceId': did})
token=self.gettk()
self.headers.update({'token': token})
pass
def getName(self):
pass
def isVideoFormat(self, url):
pass
def manualVideoCheck(self):
pass
def destroy(self):
pass
host='http://ldys.sq1005.top'
headers = {
'User-Agent': 'okhttp/4.12.0',
'client': 'app',
'deviceType': 'Android'
}
def homeContent(self, filter):
data=self.post(f"{self.host}/api/v1/app/screen/screenType", headers=self.headers).json()
result = {}
cate = {
"类型": "classify",
"地区": "region",
"年份": "year"
}
sort={
'key':'sreecnTypeEnum',
'name': '排序',
'value':[{'n':'最新','v':'NEWEST'},{'n':'人气','v':'POPULARITY'},{'n':'评分','v':'COLLECT'},{'n':'热搜','v':'HOT'}]
}
classes = []
filters = {}
for k in data['data']:
classes.append({
'type_name': k['name'],
'type_id': k['id']
})
filters[k['id']] = []
for v in k['children']:
filters[k['id']].append({
'name': v['name'],
'key': cate[v['name']],
'value':[{'n':i['name'],'v':i['name']} for i in v['children']]
})
filters[k['id']].append(sort)
result['class'] = classes
result['filters'] = filters
return result
def homeVideoContent(self):
jdata={"condition":64,"pageNum":1,"pageSize":40}
data=self.post(f"{self.host}/api/v1/app/recommend/recommendSubList", headers=self.headers, json=jdata).json()
return {'list':self.getlist(data['data']['records'])}
def categoryContent(self, tid, pg, filter, extend):
jdata = {
'condition': {
'sreecnTypeEnum': 'NEWEST',
'typeId': tid,
},
'pageNum': int(pg),
'pageSize': 40,
}
jdata['condition'].update(extend)
data = self.post(f"{self.host}/api/v1/app/screen/screenMovie", headers=self.headers, json=jdata).json()
result = {}
result['list'] = self.getlist(data['data']['records'])
result['page'] = pg
result['pagecount'] = 9999
result['limit'] = 90
result['total'] = 999999
return result
def detailContent(self, ids):
ids = ids[0].split('@@')
jdata = {"id": int(ids[0]), "typeId": ids[-1]}
v = self.post(f"{self.host}/api/v1/app/play/movieDesc", headers=self.headers, json=jdata).json()
v = v['data']
vod = {
'type_name': v.get('classify'),
'vod_year': v.get('year'),
'vod_area': v.get('area'),
'vod_actor': v.get('star'),
'vod_director': v.get('director'),
'vod_content': v.get('introduce'),
'vod_play_from': '',
'vod_play_url': ''
}
c = self.post(f"{self.host}/api/v1/app/play/movieDetails", headers=self.headers, json=jdata).json()
l = c['data']['moviePlayerList']
n = {str(i['id']): i['moviePlayerName'] for i in l}
m = jdata.copy()
m.update({'playerId': str(l[0]['id'])})
pd = self.getv(m, c['data']['episodeList'])
if len(l)-1:
with ThreadPoolExecutor(max_workers=len(l)-1) as executor:
future_to_player = {executor.submit(self.getd, jdata, player): player for player in l[1:]}
for future in future_to_player:
try:
o,p = future.result()
pd.update(self.getv(o,p))
except Exception as e:
print(f"请求失败: {e}")
w, e = [],[]
for i, x in pd.items():
if x:
w.append(n[i])
e.append(x)
vod['vod_play_from'] = '$$$'.join(w)
vod['vod_play_url'] = '$$$'.join(e)
return {'list': [vod]}
def searchContent(self, key, quick, pg="1"):
jdata={
"condition": {
"value": key
},
"pageNum": int(pg),
"pageSize": 40
}
data=self.post(f"{self.host}/api/v1/app/search/searchMovie", headers=self.headers, json=jdata).json()
return {'list':self.getlist(data['data']['records']),'page':pg}
def playerContent(self, flag, id, vipFlags):
jdata=json.loads(self.d64(id))
data = self.post(f"{self.host}/api/v1/app/play/movieDetails", headers=self.headers, json=jdata).json()
try:
params={'playerUrl':data['data']['url'],'playerId':jdata['playerId']}
pd=self.fetch(f"{self.host}/api/v1/app/play/analysisMovieUrl", headers=self.headers, params=params).json()
url,p=pd['data'],0
except Exception as e:
print(f"请求失败: {e}")
url,p=data['data']['url'],0
return {'parse': p, 'url': url, 'header': {'User-Agent': 'okhttp/4.12.0'}}
def localProxy(self, param):
pass
def liveContent(self, url):
pass
def gettk(self):
data=self.fetch(f"{self.host}/api/v1/app/user/visitorInfo", headers=self.headers).json()
return data['data']['token']
def getdid(self):
did=self.getCache('ldid')
if not did:
hex_chars = '0123456789abcdef'
did =''.join(random.choice(hex_chars) for _ in range(16))
self.setCache('ldid',did)
return did
def getd(self,jdata,player):
x = jdata.copy()
x.update({'playerId': str(player['id'])})
response = self.post(f"{self.host}/api/v1/app/play/movieDetails", headers=self.headers, json=x).json()
return x, response['data']['episodeList']
def getv(self,d,c):
f={d['playerId']:''}
g=[]
for i in c:
j=d.copy()
j.update({'episodeId':str(i['id'])})
g.append(f"{i['episode']}${self.e64(json.dumps(j))}")
f[d['playerId']]='#'.join(g)
return f
def getlist(self,data):
videos = []
for i in data:
videos.append({
'vod_id': f"{i['id']}@@{i['typeId']}",
'vod_name': i.get('name'),
'vod_pic': i.get('cover'),
'vod_year': i.get('year'),
'vod_remarks': i.get('totalEpisode')
})
return videos
def e64(self, text):
try:
text_bytes = text.encode('utf-8')
encoded_bytes = b64encode(text_bytes)
return encoded_bytes.decode('utf-8')
except Exception as e:
print(f"Base64编码错误: {str(e)}")
return ""
def d64(self,encoded_text):
try:
encoded_bytes = encoded_text.encode('utf-8')
decoded_bytes = b64decode(encoded_bytes)
return decoded_bytes.decode('utf-8')
except Exception as e:
print(f"Base64解码错误: {str(e)}")
return ""

1
肥猫/js/56DM.js Normal file
View File

@@ -0,0 +1 @@
dmFyIHJ1bGUgPSB7CiAgICB0aXRsZTogJzU25Yqo5ryrJywKICAgIGhvc3Q6ICdodHRwczovL3d3dy41NmRtLmNjLycsCiAgICB1cmw6ICdodHRwczovL3d3dy41NmRtLmNjL3R5cGUvZnljbGFzcy1meXBhZ2UuaHRtbCcsCiAgICBzZWFyY2hVcmw6ICdodHRwczovL3d3dy41NmRtLmNjL3NlYXJjaC8qKi0tLS0tLS0tLS1meXBhZ2UtLS0uaHRtbCcsCiAgICBzZWFyY2hhYmxlOiAyLCAvL+aYr+WQpuWQr+eUqOWFqOWxgOaQnOe0oiwKICAgIHF1aWNrU2VhcmNoOiAwLCAvL+aYr+WQpuWQr+eUqOW/q+mAn+aQnOe0oiwKICAgIGZpbHRlcmFibGU6IDAsIC8v5piv5ZCm5ZCv55So5YiG57G7562b6YCJLAogICAgaGVhZGVyczogewogICAgICAgICdVc2VyLUFnZW50JzogJ1VDX1VBJywgLy8gIkNvb2tpZSI6ICIiCiAgICB9LCAvLyBjbGFzc19wYXJzZTonLnN0dWktaGVhZGVyX19tZW51IGxpOmd0KDApOmx0KDcpO2EmJlRleHQ7YSYmaHJlZjsvKFxcZCspLmh0bWwnLAogICAgY2xhc3NfcGFyc2U6ICcuc251aS1oZWFkZXItbWVudS1uYXYgbGk6Z3QoMCk6bHQoNik7YSYmVGV4dDthJiZocmVmOy4qLyguKj8pLmh0bWwnLAogICAgcGxheV9wYXJzZTogdHJ1ZSwKICAgIGxhenk6IGBqczoKICAgICAgICAgICAgaWYoL1xcLihtM3U4fG1wNCkvLnRlc3QoaW5wdXQpKXsKICAgICAgICAgICAgICAgIGlucHV0ID0ge3BhcnNlOjAsdXJsOmlucHV0fQogICAgICAgICAgICB9ZWxzZXsKICAgICAgICAgICAgICAgIGlmKHJ1bGUucGFyc2VfdXJsLnN0YXJ0c1dpdGgoJ2pzb246JykpewogICAgICAgICAgICAgICAgICAgIGxldCBwdXJsID0gcnVsZS5wYXJzZV91cmwucmVwbGFjZSgnanNvbjonLCcnKStpbnB1dDsKICAgICAgICAgICAgICAgICAgICBsZXQgaHRtbCA9IHJlcXVlc3QocHVybCk7CiAgICAgICAgICAgICAgICAgICAgaW5wdXQgPSB7cGFyc2U6MCx1cmw6SlNPTi5wYXJzZShodG1sKS51cmx9CiAgICAgICAgICAgICAgICB9ZWxzZXsKICAgICAgICAgICAgICAgICAgICBpbnB1dD0gcnVsZS5wYXJzZV91cmwraW5wdXQ7IAogICAgICAgICAgICAgICAgfQogICAgICAgICAgICB9CiAgICAgICAgICAgIGAsCiAgICBsaW1pdDogNiwKICAgIOaOqOiNkDogJy5jQ0JmX0ZBQUVmYmM7bGk7YSYmdGl0bGU7Lmxhenlsb2FkJiZkYXRhLW9yaWdpbmFsOy5kQURfQkJDSSYmVGV4dDthJiZocmVmJywKICAgIGRvdWJsZTogdHJ1ZSwgLy8g5o6o6I2Q5YaF5a655piv5ZCm5Y+M5bGC5a6a5L2NCiAgICDkuIDnuqc6ICcuY0NCZl9GQUFFZmJjIGxpO2EmJnRpdGxlO2EmJmRhdGEtb3JpZ2luYWw7LmRBRF9CQkNJJiZUZXh0O2EmJmhyZWYnLAogICAg5LqM57qnOiB7CiAgICAgICAgInRpdGxlIjogImgxJiZUZXh0IiwKICAgICAgICAiaW1nIjogIi5zdHVpLWNvbnRlbnRfX3RodW1iIC5sYXp5bG9hZCYmZGF0YS1vcmlnaW5hbCIsCiAgICAgICAgImRlc2MiOiAiLmNDQmZfREFCQ2NhY19faGNJZGVFIHA6ZXEoMCkmJlRleHQ7LmNDQmZfREFCQ2NhY19faGNJZGVFIHA6ZXEoMSkmJlRleHQ7LmNDQmZfREFCQ2NhY19faGNJZGVFIHA6ZXEoMikmJlRleHQ7LmNDQmZfREFCQ2NhY19faGNJZGVFIHA6ZXEoMykmJlRleHQ7LmNDQmZfREFCQ2NhY19faGNJZGVFIHA6ZXEoNCkmJlRleHQiLAogICAgICAgICJjb250ZW50IjogIi5kZXRhaWwmJlRleHQiLAogICAgICAgICJ0YWJzIjogIi5jaGFubmVsLXRhYiBsaSIsCiAgICAgICAgImxpc3RzIjogIi5wbGF5LWxpc3QtY29udGVudDplcSgjaWQpIGxpIgogICAgfSwKICAgIOaQnOe0ojogJy5jQ0JmX0ZBQUVmYmNfX2RiRDthJiZ0aXRsZTsubGF6eWxvYWQmJmRhdGEtb3JpZ2luYWw7LmRBRF9CQkNJJiZUZXh0O2EmJmhyZWY7LmNDQmZfRkFBRWZiY19faGNJZGVFJiZwOmVxKDApIHAmJlRleHQnLAp9

1
肥猫/js/Anime1.js Normal file

File diff suppressed because one or more lines are too long

1
肥猫/js/NTDM.js Normal file

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff