更新线路

潇洒线路:v08.06(22)
PG线路:20250806-1903
This commit is contained in:
Liu
2025-08-07 01:14:16 +08:00
parent 107f190284
commit b59f6ac02c
10 changed files with 467 additions and 195 deletions

View File

@@ -560,6 +560,18 @@
"source": "?livemode=4&starttime=${(b)yyyyMMdd'T'HHmm}00.00Z&endtime=${(e)yyyyMMdd'T'HHmm}00.00Z"
}
},
{
"name": "FeiYang牧场",
"type": 0,
"url": "http://127.0.0.1:10079/c/3600/proxy/https://raw.githubusercontent.com/Mursor/LIVE/refs/heads/main/iptv.m3u",
"ua": "okhttp/3.15",
"epg": "http://127.0.0.1:10079/p/0/proxy/http://epg.112114.xyz/?ch={name}&date=DATE1SUB",
"logo": "http://127.0.0.1:10079/p/0/proxy/https://epg.112114.xyz/logo/{name}.png",
"catchup": {
"type": "append",
"source": "?livemode=4&starttime=${(b)yyyyMMdd'T'HHmm}00.00Z&endtime=${(e)yyyyMMdd'T'HHmm}00.00Z"
}
},
{
"name": "BJYD",
"type": 0,

View File

@@ -560,6 +560,18 @@
"source": "?livemode=4&starttime=${(b)yyyyMMdd'T'HHmm}00.00Z&endtime=${(e)yyyyMMdd'T'HHmm}00.00Z"
}
},
{
"name": "FeiYang牧场",
"type": 0,
"url": "http://127.0.0.1:10079/c/3600/proxy/https://raw.githubusercontent.com/Mursor/LIVE/refs/heads/main/iptv.m3u",
"ua": "okhttp/3.15",
"epg": "http://127.0.0.1:10079/p/0/proxy/http://epg.112114.xyz/?ch={name}&date=DATE1SUB",
"logo": "http://127.0.0.1:10079/p/0/proxy/https://epg.112114.xyz/logo/{name}.png",
"catchup": {
"type": "append",
"source": "?livemode=4&starttime=${(b)yyyyMMdd'T'HHmm}00.00Z&endtime=${(e)yyyyMMdd'T'HHmm}00.00Z"
}
},
{
"name": "BJYD",
"type": 0,

BIN
PG/pg.jar

Binary file not shown.

View File

@@ -1 +1 @@
8187b98e62e349ca4fa1a511850bfa80
87cd5a6765318c10028c65cc456f10f2

View File

@@ -2253,7 +2253,7 @@ function detailParse(detailObj) {
}
}
if (p === "*") {
vod.vod_play_from = "兵哥视界";
vod.vod_play_from = "在线播放";
vod.vod_remarks = detailUrl;
vod.vod_actor = "没有二级,只有一级链接直接嗅探播放";
vod.vod_content = MY_URL;
@@ -2359,7 +2359,7 @@ function detailParse(detailObj) {
}
console.log(JSON.stringify(playFrom))
} else {
playFrom = ["兵哥视界"]
playFrom = ["在线播放"]
}
vod.vod_play_from = playFrom.join(vod_play_from);
let vod_play_url = "$$$";

View File

@@ -85,6 +85,51 @@
"key3": "aassddwwxxllsx1x"
}
},
{
"key": "乐达",
"name": "乐达APP",
"type": 3,
"quickSearch": 1,
"api": "csp_AppGet",
"ext": {
"url": "https://ledayy.com",
"site": "",
"dataKey": "hjjp68c2okw12345",
"dataIv": "hjjp68c2okw12345",
"deviceId": "",
"version": ""
}
},
{
"key": "灵虎",
"name": "灵虎APP",
"type": 3,
"quickSearch": 1,
"api": "csp_AppGet",
"ext": {
"url": "",
"site": "https://bind.315999.xyz/89.txt",
"dataKey": "#getapp@TMD@2025",
"dataIv": "#getapp@TMD@2025",
"deviceId": "",
"version": "120"
}
},
{
"key": "旗星",
"name": "旗星APP",
"type": 3,
"quickSearch": 1,
"api": "csp_AppGet",
"ext": {
"url": "http://ys.qist.top",
"site": "",
"dataKey": "2SWSPFxugBLPPOKo",
"dataIv": "2SWSPFxugBLPPOKo",
"deviceId": "",
"version": "120"
}
},
{
"key": "云云",
"name": "云云APP",
@@ -279,6 +324,7 @@
"key": "米诺",
"name": "米诺APP",
"type": 3,
"quickSearch": 1,
"api": "csp_AppGet",
"ext": {
"url": "http://www.milkidc.cn",
@@ -764,23 +810,59 @@
"type": "list"
}
},
{
"key": "爱搜",
"name": "爱搜4K弹幕",
"type": 3,
"api": "./js/cloud.min.js",
"ext": "./js/爱搜.js",
"style": {
"type": "list"
}
},
{
"key": "糖果",
"name": "糖果|搜索",
"type": 3,
"api": "csp_TGSou",
"searchable": 1,
"changeable": 0
"quickSearch": 1,
"filterable": 1
},
{
"key": "音海夸克",
"name": "音海|夸克",
"type": 3,
"api": "csp_YinHaiQuark",
"searchable": 1,
"quickSearch": 1,
"filterable": 1
},
{
"key": "音海UC",
"name": "音海UC",
"type": 3,
"api": "csp_YinHaiUC",
"searchable": 1,
"quickSearch": 1,
"filterable": 1
},
{
"key": "音海天翼",
"name": "音海|天翼",
"type": 3,
"api": "csp_YinHaiTianyi",
"searchable": 1,
"quickSearch": 1,
"filterable": 1
},
{
"key": "音海123",
"name": "音海123",
"type": 3,
"api": "csp_YinHai123",
"searchable": 1,
"quickSearch": 1,
"filterable": 1
},
{
"key": "音海百度",
"name": "音海|百度",
"type": 3,
"api": "csp_YinHaiBaidu",
"searchable": 1,
"quickSearch": 1,
"filterable": 1
},
{
"key": "米搜",
@@ -788,16 +870,6 @@
"type": 3,
"api": "csp_MiSou"
},
{
"key": "人人分享站",
"name": "人人|搜索",
"type": 3,
"api": "./js/cloud.min.js",
"ext": "./js/人人分享站.js",
"style": {
"type": "list"
}
},
{
"key": "全盘",
"name": "全盘|搜索",
@@ -825,8 +897,7 @@
"searchable": 1,
"changeable": 1,
"quickSearch": 1,
"filterable": 1,
"playerType": 2
"filterable": 1
},
{
"key": "偷乐短剧",
@@ -836,8 +907,7 @@
"searchable": 1,
"changeable": 1,
"quickSearch": 1,
"filterable": 1,
"playerType": 2
"filterable": 1
},
{
"key": "爱看短剧",
@@ -847,8 +917,7 @@
"searchable": 1,
"changeable": 1,
"quickSearch": 1,
"filterable": 1,
"playerType": 2
"filterable": 1
},
{
"key": "锦鲤短剧",
@@ -858,8 +927,17 @@
"searchable": 1,
"changeable": 1,
"quickSearch": 1,
"filterable": 1,
"playerType": 2
"filterable": 1
},
{
"key": "剧王短剧",
"name": "剧王|短剧",
"type": 3,
"api": "./py/剧王短剧.py",
"searchable": 1,
"changeable": 1,
"quickSearch": 1,
"filterable": 1
},
{
"key": "短剧屋",
@@ -895,7 +973,6 @@
"searchable": 1,
"quickSearch": 1,
"filterable": 1,
"playerType": 2,
"ext": {
"site": "https://m.hkybqufgh.com,https://m.sizhengxt.com,https://m.9zhoukj.com,https://m.sizhengxt.com,https://m.jiabaide.cn"
}
@@ -908,8 +985,7 @@
"searchable": 1,
"changeable": 1,
"quickSearch": 1,
"filterable": 1,
"playerType": 2
"filterable": 1
},
{
"key": "零度影视",
@@ -919,8 +995,7 @@
"searchable": 1,
"changeable": 1,
"quickSearch": 1,
"filterable": 1,
"playerType": 2
"filterable": 1
},
{
"key": "追星影视",
@@ -930,8 +1005,7 @@
"searchable": 1,
"changeable": 1,
"quickSearch": 1,
"filterable": 1,
"playerType": 2
"filterable": 1
},
{
"key": "厂长影视",
@@ -984,8 +1058,7 @@
"key": "饺子影视",
"name": "饺子|影视",
"type": 3,
"api": "csp_Jiaozi",
"playerType": 2
"api": "csp_Jiaozi"
},
{
"key": "鸭梨影视",

View File

@@ -1,102 +0,0 @@
var rule = {
title:'人人影视[搜]',
host:'https://yyets.click',
homeUrl:'/',
url:'*',
filter_url:'{{fl.class}}',
filter:{
},
searchUrl: '*',
searchable:2,
quickSearch:0,
filterable:0,
headers:{
'User-Agent': PC_UA,
'Accept': '*/*',
'Referer': 'https://yyets.click/',
'Cookie':'http://127.0.0.1:9978/file:///tvbox/JS/lib/yyets.txt',
},
timeout:5000,
class_name:'',
class_url:'',
play_parse:true,
play_json:[{
re:'*',
json:{
parse:0,
jx:0
}
}],
lazy:'',
limit:6,
推荐:'',
一级:'',
二级:`js:
VOD.vod_play_from = "人人分享站";
VOD.vod_remarks = detailUrl;
VOD.vod_actor = "沒有二级,只有一级链接直接推送播放";
VOD.vod_content = MY_URL;
VOD.vod_play_url = "人人分享站$" + detailUrl;
`,
搜索:`js:
pdfh=jsp.pdfh;pdfa=jsp.pdfa;pd=jsp.pd;
if (rule_fetch_params.headers.Cookie.startsWith("http")){
rule_fetch_params.headers.Cookie=fetch(rule_fetch_params.headers.Cookie);
let cookie = rule_fetch_params.headers.Cookie;
setItem(RULE_CK, cookie);
};
log('yyets search cookie>>>>>>>>>>>>>>>' + rule_fetch_params.headers.Cookie);
let _fetch_params = JSON.parse(JSON.stringify(rule_fetch_params));
_fetch_params.headers.Referer = 'http://yyets.click/search?keyword=' + encodeURIComponent(KEY) + '&type=default';
log('yyets search params>>>>>>>>>>>>>>>' + JSON.stringify(_fetch_params));
let new_html=request(rule.homeUrl + 'api/resource?keyword=' + encodeURIComponent(KEY) + '&type=default', _fetch_params);
//log("yyets search result>>>>>>>>>>>>>>>" + new_html);
let json=JSON.parse(new_html);
let d=[];
for(const it in json.comment){
if (json.comment.hasOwnProperty(it)){
log("yyets search it>>>>>>>>>>>>>>>" + JSON.stringify(json.comment[it]));
if (/(www.aliyundrive.com|pan.quark.cn|www.alipan.com)/.test(json.comment[it].comment)){
let its = json.comment[it].comment.split("\\n");
let i=0;
while(i<its.length){
let title=its[i].trim().replaceAll(/\\s+/g," ");
if (title.length==0){
i++;
continue;
}
let urls=[];
log("yyets search title>>>>>>>>>>>>>>>" + title);
while(++i<its.length){
log("yyets search url>>>>>>>>>>>>>>>" + its[i]);
let burl = its[i].trim().split(" ")[0];
if (burl.length==0){
continue;
}
if (burl.includes("https://")){
urls.push("https:"+burl.split("https:")[1]);
}else{
break;
}
}
if (urls.length>0){
log("yyets search title,urls>>>>>>>>>>>>>>>" + title + ",[" + JSON.stringify(urls) + "]");
if (title.includes(KEY)){
urls.forEach(function (url) {
d.push({
title:title,
img:'',
content:json.comment[it].comment,
desc:json.comment[it].date,
url:'push://'+url
});
});
}
}
}
}
}
}
setResult(d);
`,
}

View File

@@ -1,52 +0,0 @@
var rule = {
title: '爱搜',
host: 'https://www.esoua.com/',
hostJs: '',
headers: {
'User-Agent': 'Mozilla/5.0 (Linux; Android 11; Pixel 5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.91 Mobile Safari/537.36',
},
编码: 'utf-8',
timeout: 5000,
url: 'https://www.esoua.com/search?q=fyclass&format=video&exact=true&page=fypage',
filter_url: '',
detailUrl: '',
searchUrl: 'https://www.esoua.com/search?q=**&format=video&exact=true&page=fypage',
searchable: 1,
quickSearch: 1,
filterable: 1,
class_name: '剧集&电影&短剧&动漫&综艺',
class_url: '剧集&电影&短剧&动漫&综艺',
proxy_rule: '',
sniffer: false,
isVideo: '',
play_parse: true,
parse_url: '',
lazy: "js:\n input = 'push://' + input;\n ",
limit: 9,
double: false,
// 推荐: '*',
一级: 'js:\n let html = fetch(input);\n let list = pdfa(html, "body&&.semi-space-medium-vertical");\n VODS = list.map(x => {\n let remarks = pdfh(x, "div&&img&&alt");\n // 过滤掉包含"迅雷云盘"的内容\n if(remarks.includes("迅雷云盘") || remarks.includes("115") || remarks.includes("阿里")) return null;\n return {\n vod_name: pdfh(x, "div&&a&&title"),\n vod_pic: \'\',\n vod_remarks: remarks,\n vod_content: remarks,\n vod_id: pdfh(x, "div&&a&&href")\n }\n }).filter(x => x !== null);\n ',
二级: {
title: 'h1&&Text',
img: 'img&&src',
desc: '.card-text:eq(2)&&Text;;;;',
content: 'body&&.semi-space-loose-vertical&&a&&href',
tabs: "js:TABS = ['爱搜']",
lists: "js:\n LISTS = [];\n let lists1 = pdfa(html, 'body&&.semi-space-loose-vertical').map(it => {\n let _tt = pdfh(it, 'span&&title');\n let _uu = pdfh(it, 'a&&href');\n return _tt + '$' + _uu;\n });\n LISTS.push(lists1);\n ",
},
搜索: 'js:\n let html = fetch(input);\n let list = pdfa(html, "body&&.semi-space-medium-vertical");\n VODS = list.map(x => {\n let remarks = pdfh(x, "div&&img&&alt");\n // 过滤掉包含"迅雷云盘"的内容\n if(remarks.includes("迅雷云盘") || remarks.includes("115") || remarks.includes("阿里")) return null;let vodName = pdfh(x, "div&&a&&title");\n // 过滤条件:迅雷云盘、.txt后缀、空名称\n if(vodName.endsWith(".zip") || vodName.endsWith(".txt") || !vodName.trim()) return null;\n return {\n vod_name: pdfh(x, "div&&a&&title"),\n vod_pic: \'\',\n vod_remarks: remarks,\n vod_content: remarks,\n vod_id: pdfh(x, "div&&a&&href")\n }\n }).filter(x => x !== null);\n ',
cate_exclude: '首页|留言|APP|下载|资讯|新闻|动态',
tab_exclude: '猜你|喜欢|下载|剧情|榜|评论',
类型: '影视',
homeUrl: 'https://www.esoua.com/',
二级访问前: '',
encoding: 'utf-8',
search_encoding: '',
图片来源: '',
图片替换: '',
play_json: [],
pagecount: {},
tab_remove: [],
tab_order: [],
tab_rename: {},
}

329
潇洒/py/剧王短剧.py Normal file
View File

@@ -0,0 +1,329 @@
# coding=utf-8
# !/usr/bin/python
"""
作者 丢丢喵推荐 🚓 内容均从互联网收集而来 仅供交流学习使用 版权归原创者所有 如侵犯了您的权益 请通知作者 将及时删除侵权内容
====================Diudiumiao====================
"""
from Crypto.Util.Padding import unpad
from Crypto.Util.Padding import pad
from urllib.parse import unquote
from Crypto.Cipher import ARC4
from urllib.parse import quote
from base.spider import Spider
from Crypto.Cipher import AES
from datetime import datetime
from bs4 import BeautifulSoup
from base64 import b64decode
import urllib.request
import urllib.parse
import datetime
import binascii
import requests
import base64
import json
import time
import sys
import re
import os
sys.path.append('..')
xurl = "https://djw1.com"
headerx = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.87 Safari/537.36'
}
class Spider(Spider):
global xurl
global headerx
def getName(self):
return "首页"
def init(self, extend):
pass
def isVideoFormat(self, url):
pass
def manualVideoCheck(self):
pass
def extract_middle_text(self, text, start_str, end_str, pl, start_index1: str = '', end_index2: str = ''):
if pl == 3:
plx = []
while True:
start_index = text.find(start_str)
if start_index == -1:
break
end_index = text.find(end_str, start_index + len(start_str))
if end_index == -1:
break
middle_text = text[start_index + len(start_str):end_index]
plx.append(middle_text)
text = text.replace(start_str + middle_text + end_str, '')
if len(plx) > 0:
purl = ''
for i in range(len(plx)):
matches = re.findall(start_index1, plx[i])
output = ""
for match in matches:
match3 = re.search(r'(?:^|[^0-9])(\d+)(?:[^0-9]|$)', match[1])
if match3:
number = match3.group(1)
else:
number = 0
if 'http' not in match[0]:
output += f"#{match[1]}${number}{xurl}{match[0]}"
else:
output += f"#{match[1]}${number}{match[0]}"
output = output[1:]
purl = purl + output + "$$$"
purl = purl[:-3]
return purl
else:
return ""
else:
start_index = text.find(start_str)
if start_index == -1:
return ""
end_index = text.find(end_str, start_index + len(start_str))
if end_index == -1:
return ""
if pl == 0:
middle_text = text[start_index + len(start_str):end_index]
return middle_text.replace("\\", "")
if pl == 1:
middle_text = text[start_index + len(start_str):end_index]
matches = re.findall(start_index1, middle_text)
if matches:
jg = ' '.join(matches)
return jg
if pl == 2:
middle_text = text[start_index + len(start_str):end_index]
matches = re.findall(start_index1, middle_text)
if matches:
new_list = [f'{item}' for item in matches]
jg = '$$$'.join(new_list)
return jg
def homeContent(self, filter):
result = {"class": []}
detail = requests.get(url=xurl + "/all/", headers=headerx)
detail.encoding = "utf-8"
res = detail.text
doc = BeautifulSoup(res, "lxml")
soups = doc.find_all('section', class_="container items")
for soup in soups:
vods = soup.find_all('li')
for vod in vods:
id = vod.find('a')['href']
name = vod.text.strip()
result["class"].append({"type_id": id, "type_name": "" + name})
return result
def homeVideoContent(self):
pass
def categoryContent(self, cid, pg, filter, ext):
result = {}
videos = []
if pg:
page = int(pg)
else:
page = 1
url = f'{cid}page/{str(page)}/'
detail = requests.get(url=url, headers=headerx)
detail.encoding = "utf-8"
res = detail.text
doc = BeautifulSoup(res, "lxml")
soups = doc.find_all('section', class_="container items")
for soup in soups:
vods = soup.find_all('li')
for vod in vods:
name = vod.find('img')['alt']
ids = vod.find('a', class_="image-line")
id = ids['href']
pic = vod.find('img')['src']
remark = self.extract_middle_text(str(vod), 'class="remarks light">', '<', 0)
video = {
"vod_id": id,
"vod_name": name,
"vod_pic": pic,
"vod_remarks": '▶️' + remark
}
videos.append(video)
result = {'list': videos}
result['page'] = pg
result['pagecount'] = 9999
result['limit'] = 90
result['total'] = 999999
return result
def detailContent(self, ids):
did = ids[0]
result = {}
videos = []
xianlu = ''
bofang = ''
if 'http' not in did:
did = xurl + did
res = requests.get(url=did, headers=headerx)
res.encoding = "utf-8"
res = res.text
doc = BeautifulSoup(res, "lxml")
url = 'https://fs-im-kefu.7moor-fs1.com/ly/4d2c3f00-7d4c-11e5-af15-41bf63ae4ea0/1732707176882/jiduo.txt'
response = requests.get(url)
response.encoding = 'utf-8'
code = response.text
name = self.extract_middle_text(code, "s1='", "'", 0)
Jumps = self.extract_middle_text(code, "s2='", "'", 0)
content = '集多为您介绍剧情📢' + self.extract_middle_text(res,'class="info-detail">','<', 0)
remarks = self.extract_middle_text(res, 'class="info-mark">', '<', 0)
year = self.extract_middle_text(res, 'class="info-addtime">', '<', 0)
if name not in content:
bofang = Jumps
xianlu = '1'
else:
soups = doc.find('div', class_="ep-list-items")
soup = soups.find_all('a')
for sou in soup:
id = sou['href']
name = sou.text.strip()
bofang = bofang + name + '$' + id + '#'
bofang = bofang[:-1]
xianlu = '专线'
videos.append({
"vod_id": did,
"vod_remarks": remarks,
"vod_year": year,
"vod_content": content,
"vod_play_from": xianlu,
"vod_play_url": bofang
})
result['list'] = videos
return result
def playerContent(self, flag, id, vipFlags):
res = requests.get(url=id, headers=headerx)
res.encoding = "utf-8"
res = res.text
url = self.extract_middle_text(res, '"wwm3u8":"', '"', 0).replace('\\', '')
result = {}
result["parse"] = 0
result["playUrl"] = ''
result["url"] = url
result["header"] = headerx
return result
def searchContentPage(self, key, quick, pg):
result = {}
videos = []
if pg:
page = int(pg)
else:
page = 1
url = f'{xurl}/search/{key}/page/{str(page)}/'
detail = requests.get(url=url, headers=headerx)
detail.encoding = "utf-8"
res = detail.text
doc = BeautifulSoup(res, "lxml")
soups = doc.find_all('section', class_="container items")
for soup in soups:
vods = soup.find_all('li')
for vod in vods:
name = vod.find('img')['alt']
ids = vod.find('a', class_="image-line")
id = ids['href']
pic = vod.find('img')['src']
remark = self.extract_middle_text(str(vod), 'class="remarks light">', '<', 0)
video = {
"vod_id": id,
"vod_name": name,
"vod_pic": pic,
"vod_remarks": '▶️' + remark
}
videos.append(video)
result['list'] = videos
result['page'] = pg
result['pagecount'] = 9999
result['limit'] = 90
result['total'] = 999999
return result
def searchContent(self, key, quick, pg="1"):
return self.searchContentPage(key, quick, '1')
def localProxy(self, params):
if params['type'] == "m3u8":
return self.proxyM3u8(params)
elif params['type'] == "media":
return self.proxyMedia(params)
elif params['type'] == "ts":
return self.proxyTs(params)
return None

Binary file not shown.