潇洒更新 202508060355

This commit is contained in:
github-actions[bot]
2025-08-06 03:55:51 +00:00
parent 64bfc124df
commit 888a1d9dd0
9 changed files with 3777 additions and 3293 deletions

View File

@@ -85,6 +85,51 @@
"key3": "aassddwwxxllsx1x"
}
},
{
"key": "乐达",
"name": "乐达APP",
"type": 3,
"quickSearch": 1,
"api": "csp_AppGet",
"ext": {
"url": "https://ledayy.com",
"site": "",
"dataKey": "hjjp68c2okw12345",
"dataIv": "hjjp68c2okw12345",
"deviceId": "",
"version": ""
}
},
{
"key": "灵虎",
"name": "灵虎APP",
"type": 3,
"quickSearch": 1,
"api": "csp_AppGet",
"ext": {
"url": "",
"site": "https://bind.315999.xyz/89.txt",
"dataKey": "#getapp@TMD@2025",
"dataIv": "#getapp@TMD@2025",
"deviceId": "",
"version": "120"
}
},
{
"key": "旗星",
"name": "旗星APP",
"type": 3,
"quickSearch": 1,
"api": "csp_AppGet",
"ext": {
"url": "http://ys.qist.top",
"site": "",
"dataKey": "2SWSPFxugBLPPOKo",
"dataIv": "2SWSPFxugBLPPOKo",
"deviceId": "",
"version": "120"
}
},
{
"key": "云云",
"name": "云云APP",
@@ -114,35 +159,6 @@
"version": ""
}
},
{
"key": "魔方",
"name": "魔方APP",
"type": 3,
"quickSearch": 1,
"api": "csp_AppGet",
"ext": {
"url": "",
"site": "https://www.snysw.xyz/mfys.txt",
"dataKey": "1234567887654321",
"dataIv": "1234567887654321",
"deviceId": "",
"version": ""
}
},
{
"key": "冬天",
"name": "冬天APP",
"type": 3,
"quickSearch": 1,
"api": "csp_AppGet",
"ext": {
"url": "http://154.37.152.97:5211",
"dataKey": "685bf108bc47b67d",
"dataIv": "685bf108bc47b67d",
"deviceId": "685bf108bc47b67d",
"version": ""
}
},
{
"key": "火猫",
"name": "火猫丨APP",
@@ -173,21 +189,6 @@
"version": "120"
}
},
{
"key": "海豚",
"name": "海豚丨APP",
"type": 3,
"quickSearch": 1,
"api": "csp_AppGet2",
"ext": {
"url": "https://qjappcms.htsp4k.top",
"site": "",
"dataKey": "R69yVluzg6yLpjp0",
"dataIv": "R69yVluzg6yLpjp0",
"deviceId": "",
"version": ""
}
},
{
"key": "哔滴",
"name": "哔滴丨APP",
@@ -217,21 +218,6 @@
"version": ""
}
},
{
"key": "小野",
"name": "小野APP",
"type": 3,
"quickSearch": 1,
"api": "csp_AppGet",
"ext": {
"url": "https://appcms.xy4k.com",
"site": "",
"dataKey": "7SDWjknU34zqFbVr",
"dataIv": "7SDWjknU34zqFbVr",
"deviceId": "",
"version": ""
}
},
{
"key": "爱看",
"name": "爱看丨APP",
@@ -304,20 +290,6 @@
"version": "120"
}
},
{
"key": "榴莲",
"name": "榴莲APP",
"type": 3,
"quickSearch": 1,
"api": "csp_AppGet2",
"ext": {
"url": "https://qjappcms.ll4k.xyz",
"dataKey": "1yGA85sJ5STtE7uj",
"dataIv": "1yGA85sJ5STtE7uj",
"deviceId": "",
"version": "50000"
}
},
{
"key": "仓鼠",
"name": "仓鼠APP",
@@ -391,20 +363,6 @@
"version": ""
}
},
{
"key": "溜溜",
"name": "溜溜APP",
"type": 3,
"quickSearch": 1,
"api": "csp_AppGet",
"ext": {
"url": "https://appcms.ll4k.xyz",
"dataKey": "NiDGaKiVnkO3QX1Q",
"dataIv": "NiDGaKiVnkO3QX1Q",
"deviceId": "2fbaf48ee97783260bc907e3ab0bd40c3",
"version": "200"
}
},
{
"key": "晴天",
"name": "晴天APP",
@@ -519,20 +477,6 @@
"version": ""
}
},
{
"key": "诺映",
"name": "诺映APP",
"type": 3,
"quickSearch": 1,
"api": "csp_AppGet",
"ext": {
"url": "https://www.noad.top",
"dataKey": "708FA298F0855840",
"dataIv": "708FA298F0855840",
"deviceId": "2129ec9e6e5703cb0aeeddd79554e38f8",
"version": "103"
}
},
{
"key": "移动",
"name": "移动APP",
@@ -865,16 +809,6 @@
"type": "list"
}
},
{
"key": "爱搜",
"name": "爱搜4K弹幕",
"type": 3,
"api": "./js/cloud.min.js",
"ext": "./js/爱搜.js",
"style": {
"type": "list"
}
},
{
"key": "糖果",
"name": "糖果|搜索",
@@ -889,16 +823,6 @@
"type": 3,
"api": "csp_MiSou"
},
{
"key": "人人分享站",
"name": "人人|搜索",
"type": 3,
"api": "./js/cloud.min.js",
"ext": "./js/人人分享站.js",
"style": {
"type": "list"
}
},
{
"key": "全盘",
"name": "全盘|搜索",
@@ -926,8 +850,7 @@
"searchable": 1,
"changeable": 1,
"quickSearch": 1,
"filterable": 1,
"playerType": 2
"filterable": 1
},
{
"key": "偷乐短剧",
@@ -937,8 +860,7 @@
"searchable": 1,
"changeable": 1,
"quickSearch": 1,
"filterable": 1,
"playerType": 2
"filterable": 1
},
{
"key": "爱看短剧",
@@ -948,8 +870,7 @@
"searchable": 1,
"changeable": 1,
"quickSearch": 1,
"filterable": 1,
"playerType": 2
"filterable": 1
},
{
"key": "锦鲤短剧",
@@ -959,8 +880,17 @@
"searchable": 1,
"changeable": 1,
"quickSearch": 1,
"filterable": 1,
"playerType": 2
"filterable": 1
},
{
"key": "剧王短剧",
"name": "剧王|短剧",
"type": 3,
"api": "./py/剧王短剧.py",
"searchable": 1,
"changeable": 1,
"quickSearch": 1,
"filterable": 1
},
{
"key": "短剧屋",
@@ -996,7 +926,6 @@
"searchable": 1,
"quickSearch": 1,
"filterable": 1,
"playerType": 2,
"ext": {
"site": "https://m.hkybqufgh.com,https://m.sizhengxt.com,https://m.9zhoukj.com,https://m.sizhengxt.com,https://m.jiabaide.cn"
}
@@ -1009,8 +938,7 @@
"searchable": 1,
"changeable": 1,
"quickSearch": 1,
"filterable": 1,
"playerType": 2
"filterable": 1
},
{
"key": "零度影视",
@@ -1020,8 +948,7 @@
"searchable": 1,
"changeable": 1,
"quickSearch": 1,
"filterable": 1,
"playerType": 2
"filterable": 1
},
{
"key": "追星影视",
@@ -1031,8 +958,7 @@
"searchable": 1,
"changeable": 1,
"quickSearch": 1,
"filterable": 1,
"playerType": 2
"filterable": 1
},
{
"key": "厂长影视",
@@ -1085,8 +1011,7 @@
"key": "饺子影视",
"name": "饺子|影视",
"type": 3,
"api": "csp_Jiaozi",
"playerType": 2
"api": "csp_Jiaozi"
},
{
"key": "鸭梨影视",

6256
xiaosa/js/cloud.min.js vendored

File diff suppressed because it is too large Load Diff

1
xiaosa/js/drpy-core-lite.min.js vendored Normal file

File diff suppressed because one or more lines are too long

145
xiaosa/js/drpy2.min.js vendored
View File

@@ -1,26 +1,42 @@
import cheerio from "assets://js/lib/cheerio.min.js";
import "assets://js/lib/crypto-js.js";
import "./jsencrypt.js";
import "./node-rsa.js";
import "./pako.min.js";
import 模板 from "./模板.js";
import {
gbkTool
} from "./gbk.js";
import "./json5.js";
import "./jinja.js";
const _jinja2 = cheerio.jinja2;
cheerio.jinja2 = function(template, obj) {
try {
return jinja.render(template, obj)
} catch (e) {
console.log("新的jinja2库渲染失败,换回原始cheerio:" + e.message);
return _jinja2(template, obj)
}
};
cheerio,
模板
} from "./drpy-core-lite.min.js";
let vercode = typeof pdfl === "function" ? "drpy2.1" : "drpy2";
const VERSION = vercode + " 3.9.51beta6 20241126";
const VERSION = vercode + " 3.9.52beta3 20250801";
const UpdateInfo = [{
date: "20250801",
title: "drpy依赖更新使用drpy-core-lite.min.js",
version: "3.9.52beta3 20250801",
msg: `
drpy-core.min.js 更换为更小的drpy-core-lite.min.js
`
}, {
date: "20250729",
title: "drpy更新所有依赖打包成一个js文件",
version: "3.9.52beta2 20250729",
msg: `
1. wasm支持
2. 引入 TextEncoder、TextDecoder对象
3. 引入 WXXH 加解密库
4. 所有依赖打包成一个js
5. 增加 buildQueryString
`
}, {
date: "20250728",
title: "drpy更新增加tab_order线路模糊排序优化解密算法支持文件头",
version: "3.9.52beta1 20250728",
msg: `
1. 增加tab_order线路模糊排序
2. 优化解密算法支持文件头
3. wasm支持
4. 增加 removeHeader 函数可用于清除js/py文件的头信息及所有头注释
5. 引入 TextEncoder、TextDecoder对象
6. 引入 WXXH 加解密库
`
}, {
date: "20241126",
title: "drpy更新优化去广告算法",
version: "3.9.51beta6 20241126",
@@ -708,8 +724,7 @@ function ungzip(b64Data) {
function encodeStr(input, encoding) {
encoding = encoding || "gbk";
if (encoding.startsWith("gb")) {
const strTool = gbkTool();
input = strTool.encode(input)
input = gbkTool.encode(input)
}
return input
}
@@ -717,8 +732,7 @@ function encodeStr(input, encoding) {
function decodeStr(input, encoding) {
encoding = encoding || "gbk";
if (encoding.startsWith("gb")) {
const strTool = gbkTool();
input = strTool.decode(input)
input = gbkTool.decode(input)
}
return input
}
@@ -1351,6 +1365,24 @@ function keysToLowerCase(obj) {
}, {})
}
function buildQueryString(params) {
const queryArray = [];
for (const key in params) {
if (params.hasOwnProperty(key)) {
let value = params[key];
if (value === undefined || value === null) {
value = ""
} else {
value = value.toString()
}
const encodedKey = encodeURIComponent(key);
const encodedValue = encodeURIComponent(value);
queryArray.push(encodedKey + "=" + encodedValue)
}
}
return queryArray.join("&")
}
function parseQueryString(query) {
const params = {};
query.split("&").forEach(function(part) {
@@ -2442,7 +2474,17 @@ function vodDeal(vod) {
if (rule.tab_order && rule.tab_order.length > 0) {
let tab_order = rule.tab_order;
tab_ordered_list = tab_removed_list.sort((a, b) => {
return (tab_order.indexOf(a) === -1 ? 9999 : tab_order.indexOf(a)) - (tab_order.indexOf(b) === -1 ? 9999 : tab_order.indexOf(b))
const getOrderIndex = (tabName, orderRules) => {
for (let i = 0; i < orderRules.length; i++) {
if (tabName.includes(orderRules[i])) {
return i
}
}
return 9999
};
const indexA = getOrderIndex(a, tab_order);
const indexB = getOrderIndex(b, tab_order);
return indexA - indexB
});
tab_list = tab_ordered_list
}
@@ -2574,11 +2616,62 @@ function isVideoParse(isVideoObj) {
}
}
function removeHeader(content, options = {}) {
const {
mode = "header-only", fileType
} = options;
const COMMENT_CONFIG = {
".js": {
start: "/*",
end: "*/",
regex: /^\s*\/\*([\s\S]*?)\*\/\s*/,
headerRegex: /@header\(([\s\S]*?)\)/,
topCommentsRegex: /^(\s*(\/\/[^\n]*\n|\/\*[\s\S]*?\*\/)\s*)+/
},
".py": {
start: '"""',
end: '"""',
regex: /^\s*"""([\s\S]*?)"""\s*/,
headerRegex: /@header\(([\s\S]*?)\)/,
topCommentsRegex: /^(\s*(#[^\n]*\n|'''[\s\S]*?'''|"""[\s\S]*?""")\s*)+/
}
};
if (!fileType) throw new Error("fileType option is required");
const ext = fileType.startsWith(".") ? fileType : `.${fileType}`;
const config = COMMENT_CONFIG[ext];
if (!config) throw new Error(`Unsupported file type: ${ext}`);
if (mode === "top-comments") {
const match = content.match(config.topCommentsRegex);
if (match) {
return content.substring(match[0].length).trim()
}
return content.trim()
}
const match = content.match(config.regex);
if (!match) return content.trim();
let [fullComment, innerContent] = match;
if (config.headerRegex.test(innerContent)) {
innerContent = innerContent.replace(config.headerRegex, "");
const cleanedInner = innerContent.split("\n").filter(line => line.trim().length > 0).join("\n");
if (!cleanedInner.trim()) {
return content.replace(fullComment, "").trim()
} else {
const newComment = `${config.start}${cleanedInner}${config.end}`;
return content.replace(fullComment, newComment).trim()
}
}
return content.trim()
}
function getOriginalJs(js_code) {
let current_match = /var rule|[\u4E00-\u9FA5]+|function|let |var |const |\(|\)|"|'/;
let current_match = /var rule|function|let |var |const|class Rule|async|this\./;
if (current_match.test(js_code)) {
return js_code
}
js_code = removeHeader(js_code, {
mode: "top-comments",
fileType: ".js"
});
let rsa_private_key = "MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCqin/jUpqM6+fgYP/oMqj9zcdHMM0mEZXLeTyixIJWP53lzJV2N2E3OP6BBpUmq2O1a9aLnTIbADBaTulTNiOnVGoNG58umBnupnbmmF8iARbDp2mTzdMMeEgLdrfXS6Y3VvazKYALP8EhEQykQVarexR78vRq7ltY3quXx7cgI0ROfZz5Sw3UOLQJ+VoWmwIxu9AMEZLVzFDQN93hzuzs3tNyHK6xspBGB7zGbwCg+TKi0JeqPDrXxYUpAz1cQ/MO+Da0WgvkXnvrry8NQROHejdLVOAslgr6vYthH9bKbsGyNY3H+P12kcxo9RAcVveONnZbcMyxjtF5dWblaernAgMBAAECggEAGdEHlSEPFmAr5PKqKrtoi6tYDHXdyHKHC5tZy4YV+Pp+a6gxxAiUJejx1hRqBcWSPYeKne35BM9dgn5JofgjI5SKzVsuGL6bxl3ayAOu+xXRHWM9f0t8NHoM5fdd0zC3g88dX3fb01geY2QSVtcxSJpEOpNH3twgZe6naT2pgiq1S4okpkpldJPo5GYWGKMCHSLnKGyhwS76gF8bTPLoay9Jxk70uv6BDUMlA4ICENjmsYtd3oirWwLwYMEJbSFMlyJvB7hjOjR/4RpT4FPnlSsIpuRtkCYXD4jdhxGlvpXREw97UF2wwnEUnfgiZJ2FT/MWmvGGoaV/CfboLsLZuQKBgQDTNZdJrs8dbijynHZuuRwvXvwC03GDpEJO6c1tbZ1s9wjRyOZjBbQFRjDgFeWs9/T1aNBLUrgsQL9c9nzgUziXjr1Nmu52I0Mwxi13Km/q3mT+aQfdgNdu6ojsI5apQQHnN/9yMhF6sNHg63YOpH+b+1bGRCtr1XubuLlumKKscwKBgQDOtQ2lQjMtwsqJmyiyRLiUOChtvQ5XI7B2mhKCGi8kZ+WEAbNQcmThPesVzW+puER6D4Ar4hgsh9gCeuTaOzbRfZ+RLn3Aksu2WJEzfs6UrGvm6DU1INn0z/tPYRAwPX7sxoZZGxqML/z+/yQdf2DREoPdClcDa2Lmf1KpHdB+vQKBgBXFCVHz7a8n4pqXG/HvrIMJdEpKRwH9lUQS/zSPPtGzaLpOzchZFyQQBwuh1imM6Te+VPHeldMh3VeUpGxux39/m+160adlnRBS7O7CdgSsZZZ/dusS06HAFNraFDZf1/VgJTk9BeYygX+AZYu+0tReBKSs9BjKSVJUqPBIVUQXAoGBAJcZ7J6oVMcXxHxwqoAeEhtvLcaCU9BJK36XQ/5M67ceJ72mjJC6/plUbNukMAMNyyi62gO6I9exearecRpB/OGIhjNXm99Ar59dAM9228X8gGfryLFMkWcO/fNZzb6lxXmJ6b2LPY3KqpMwqRLTAU/zy+ax30eFoWdDHYa4X6e1AoGAfa8asVGOJ8GL9dlWufEeFkDEDKO9ww5GdnpN+wqLwePWqeJhWCHad7bge6SnlylJp5aZXl1+YaBTtOskC4Whq9TP2J+dNIgxsaF5EFZQJr8Xv+lY9lu0CruYOh9nTNF9x3nubxJgaSid/7yRPfAGnsJRiknB5bsrCvgsFQFjJVs=";
let decode_content = "";

View File

@@ -31,7 +31,7 @@ var rule = {
img: 'img&&src',
desc: '.card-text:eq(2)&&Text;;;;',
content: 'body&&.semi-space-loose-vertical&&a&&href',
tabs: "js:TABS = ['懒盘']",
tabs: "js:TABS = ['爱搜']",
lists: "js:\n LISTS = [];\n let lists1 = pdfa(html, 'body&&.semi-space-loose-vertical').map(it => {\n let _tt = pdfh(it, 'span&&title');\n let _uu = pdfh(it, 'a&&href');\n return _tt + '$' + _uu;\n });\n LISTS.push(lists1);\n ",
},
搜索: 'js:\n let html = fetch(input);\n let list = pdfa(html, "body&&.semi-space-medium-vertical");\n VODS = list.map(x => {\n let remarks = pdfh(x, "div&&img&&alt");\n // 过滤掉包含"迅雷云盘"的内容\n if(remarks.includes("迅雷云盘") || remarks.includes("115") || remarks.includes("阿里")) return null;let vodName = pdfh(x, "div&&a&&title");\n // 过滤条件:迅雷云盘、.txt后缀、空名称\n if(vodName.endsWith(".zip") || vodName.endsWith(".txt") || !vodName.trim()) return null;\n return {\n vod_name: pdfh(x, "div&&a&&title"),\n vod_pic: \'\',\n vod_remarks: remarks,\n vod_content: remarks,\n vod_id: pdfh(x, "div&&a&&href")\n }\n }).filter(x => x !== null);\n ',

329
xiaosa/py/剧王短剧.py Normal file
View File

@@ -0,0 +1,329 @@
# coding=utf-8
# !/usr/bin/python
"""
作者 丢丢喵推荐 🚓 内容均从互联网收集而来 仅供交流学习使用 版权归原创者所有 如侵犯了您的权益 请通知作者 将及时删除侵权内容
====================Diudiumiao====================
"""
from Crypto.Util.Padding import unpad
from Crypto.Util.Padding import pad
from urllib.parse import unquote
from Crypto.Cipher import ARC4
from urllib.parse import quote
from base.spider import Spider
from Crypto.Cipher import AES
from datetime import datetime
from bs4 import BeautifulSoup
from base64 import b64decode
import urllib.request
import urllib.parse
import datetime
import binascii
import requests
import base64
import json
import time
import sys
import re
import os
sys.path.append('..')
xurl = "https://djw1.com"
headerx = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.87 Safari/537.36'
}
class Spider(Spider):
global xurl
global headerx
def getName(self):
return "首页"
def init(self, extend):
pass
def isVideoFormat(self, url):
pass
def manualVideoCheck(self):
pass
def extract_middle_text(self, text, start_str, end_str, pl, start_index1: str = '', end_index2: str = ''):
if pl == 3:
plx = []
while True:
start_index = text.find(start_str)
if start_index == -1:
break
end_index = text.find(end_str, start_index + len(start_str))
if end_index == -1:
break
middle_text = text[start_index + len(start_str):end_index]
plx.append(middle_text)
text = text.replace(start_str + middle_text + end_str, '')
if len(plx) > 0:
purl = ''
for i in range(len(plx)):
matches = re.findall(start_index1, plx[i])
output = ""
for match in matches:
match3 = re.search(r'(?:^|[^0-9])(\d+)(?:[^0-9]|$)', match[1])
if match3:
number = match3.group(1)
else:
number = 0
if 'http' not in match[0]:
output += f"#{match[1]}${number}{xurl}{match[0]}"
else:
output += f"#{match[1]}${number}{match[0]}"
output = output[1:]
purl = purl + output + "$$$"
purl = purl[:-3]
return purl
else:
return ""
else:
start_index = text.find(start_str)
if start_index == -1:
return ""
end_index = text.find(end_str, start_index + len(start_str))
if end_index == -1:
return ""
if pl == 0:
middle_text = text[start_index + len(start_str):end_index]
return middle_text.replace("\\", "")
if pl == 1:
middle_text = text[start_index + len(start_str):end_index]
matches = re.findall(start_index1, middle_text)
if matches:
jg = ' '.join(matches)
return jg
if pl == 2:
middle_text = text[start_index + len(start_str):end_index]
matches = re.findall(start_index1, middle_text)
if matches:
new_list = [f'{item}' for item in matches]
jg = '$$$'.join(new_list)
return jg
def homeContent(self, filter):
result = {"class": []}
detail = requests.get(url=xurl + "/all/", headers=headerx)
detail.encoding = "utf-8"
res = detail.text
doc = BeautifulSoup(res, "lxml")
soups = doc.find_all('section', class_="container items")
for soup in soups:
vods = soup.find_all('li')
for vod in vods:
id = vod.find('a')['href']
name = vod.text.strip()
result["class"].append({"type_id": id, "type_name": "" + name})
return result
def homeVideoContent(self):
pass
def categoryContent(self, cid, pg, filter, ext):
result = {}
videos = []
if pg:
page = int(pg)
else:
page = 1
url = f'{cid}page/{str(page)}/'
detail = requests.get(url=url, headers=headerx)
detail.encoding = "utf-8"
res = detail.text
doc = BeautifulSoup(res, "lxml")
soups = doc.find_all('section', class_="container items")
for soup in soups:
vods = soup.find_all('li')
for vod in vods:
name = vod.find('img')['alt']
ids = vod.find('a', class_="image-line")
id = ids['href']
pic = vod.find('img')['src']
remark = self.extract_middle_text(str(vod), 'class="remarks light">', '<', 0)
video = {
"vod_id": id,
"vod_name": name,
"vod_pic": pic,
"vod_remarks": '▶️' + remark
}
videos.append(video)
result = {'list': videos}
result['page'] = pg
result['pagecount'] = 9999
result['limit'] = 90
result['total'] = 999999
return result
def detailContent(self, ids):
did = ids[0]
result = {}
videos = []
xianlu = ''
bofang = ''
if 'http' not in did:
did = xurl + did
res = requests.get(url=did, headers=headerx)
res.encoding = "utf-8"
res = res.text
doc = BeautifulSoup(res, "lxml")
url = 'https://fs-im-kefu.7moor-fs1.com/ly/4d2c3f00-7d4c-11e5-af15-41bf63ae4ea0/1732707176882/jiduo.txt'
response = requests.get(url)
response.encoding = 'utf-8'
code = response.text
name = self.extract_middle_text(code, "s1='", "'", 0)
Jumps = self.extract_middle_text(code, "s2='", "'", 0)
content = '集多为您介绍剧情📢' + self.extract_middle_text(res,'class="info-detail">','<', 0)
remarks = self.extract_middle_text(res, 'class="info-mark">', '<', 0)
year = self.extract_middle_text(res, 'class="info-addtime">', '<', 0)
if name not in content:
bofang = Jumps
xianlu = '1'
else:
soups = doc.find('div', class_="ep-list-items")
soup = soups.find_all('a')
for sou in soup:
id = sou['href']
name = sou.text.strip()
bofang = bofang + name + '$' + id + '#'
bofang = bofang[:-1]
xianlu = '专线'
videos.append({
"vod_id": did,
"vod_remarks": remarks,
"vod_year": year,
"vod_content": content,
"vod_play_from": xianlu,
"vod_play_url": bofang
})
result['list'] = videos
return result
def playerContent(self, flag, id, vipFlags):
res = requests.get(url=id, headers=headerx)
res.encoding = "utf-8"
res = res.text
url = self.extract_middle_text(res, '"wwm3u8":"', '"', 0).replace('\\', '')
result = {}
result["parse"] = 0
result["playUrl"] = ''
result["url"] = url
result["header"] = headerx
return result
def searchContentPage(self, key, quick, pg):
result = {}
videos = []
if pg:
page = int(pg)
else:
page = 1
url = f'{xurl}/search/{key}/page/{str(page)}/'
detail = requests.get(url=url, headers=headerx)
detail.encoding = "utf-8"
res = detail.text
doc = BeautifulSoup(res, "lxml")
soups = doc.find_all('section', class_="container items")
for soup in soups:
vods = soup.find_all('li')
for vod in vods:
name = vod.find('img')['alt']
ids = vod.find('a', class_="image-line")
id = ids['href']
pic = vod.find('img')['src']
remark = self.extract_middle_text(str(vod), 'class="remarks light">', '<', 0)
video = {
"vod_id": id,
"vod_name": name,
"vod_pic": pic,
"vod_remarks": '▶️' + remark
}
videos.append(video)
result['list'] = videos
result['page'] = pg
result['pagecount'] = 9999
result['limit'] = 90
result['total'] = 999999
return result
def searchContent(self, key, quick, pg="1"):
return self.searchContentPage(key, quick, '1')
def localProxy(self, params):
if params['type'] == "m3u8":
return self.proxyM3u8(params)
elif params['type'] == "media":
return self.proxyMedia(params)
elif params['type'] == "ts":
return self.proxyTs(params)
return None

Binary file not shown.