mirror of
https://github.com/ls125781003/tvboxtg.git
synced 2025-12-12 23:22:20 +00:00
更新线路
整体线路:v09.02 潇洒线路:v09.02 PG线路:20250903-1624
This commit is contained in:
496
摸鱼儿/api.json
496
摸鱼儿/api.json
@@ -56,15 +56,6 @@
|
||||
"danMu": "弹"
|
||||
}
|
||||
},
|
||||
{
|
||||
"key": "hlxb",
|
||||
"name": "♨️昨夜星辰┃4K纯净",
|
||||
"type": 4,
|
||||
"api": "http://我不是.摸鱼儿.com/api/hlxb.php",
|
||||
"searchable": 1,
|
||||
"quickSearch": 1,
|
||||
"changeable": 0
|
||||
},
|
||||
{
|
||||
"key": "seed",
|
||||
"name": "♨️霜雪笼花┃三盘聚合",
|
||||
@@ -80,19 +71,179 @@
|
||||
}
|
||||
},
|
||||
{
|
||||
"key": "星河",
|
||||
"name": "🌀星河┃超清┃弹幕",
|
||||
"key": "玩偶哥哥",
|
||||
"name": "♨️玩偶备用┃4K弹幕",
|
||||
"type": 3,
|
||||
"quickSearch": 1,
|
||||
"api": "csp_AppSy",
|
||||
"api": "csp_PanWebShare",
|
||||
"jar": "./jars/config.jar",
|
||||
"searchable": 1,
|
||||
"quickSearch": 1,
|
||||
"filterable": 1,
|
||||
"ext": "./json/wogg.json"
|
||||
},
|
||||
{
|
||||
"key": "MoggV2",
|
||||
"name": "🍁优汐木偶┃4K弹幕",
|
||||
"type": 3,
|
||||
"api": "csp_PanWebShare",
|
||||
"jar": "./jars/config.jar",
|
||||
"searchable": 1,
|
||||
"quickSearch": 0,
|
||||
"filterable": 0,
|
||||
"ext": "./json/mogg.json"
|
||||
},
|
||||
{
|
||||
"key": "UcXmV2",
|
||||
"name": "🐂小二资源┃4K弹幕",
|
||||
"type": 3,
|
||||
"api": "csp_PanWebShare",
|
||||
"jar": "./jars/config.jar",
|
||||
"searchable": 1,
|
||||
"filterable": 0,
|
||||
"changeable": 0,
|
||||
"timeout": 60,
|
||||
"ext": "./json/ex.json"
|
||||
},
|
||||
{
|
||||
"key": "QuarkLaBiV2",
|
||||
"name": "🖍︎蜡笔资源┃4K弹幕",
|
||||
"type": 3,
|
||||
"api": "csp_PanWebShare",
|
||||
"jar": "./jars/config.jar",
|
||||
"searchable": 1,
|
||||
"filterable": 0,
|
||||
"changeable": 0,
|
||||
"timeout": 60,
|
||||
"ext": "./json/lb.json"
|
||||
},
|
||||
{
|
||||
"key": "QuarkzzV2",
|
||||
"name": "🏆️夸克至臻┃4K弹幕",
|
||||
"type": 3,
|
||||
"api": "csp_PanWebShare",
|
||||
"jar": "./jars/config.jar",
|
||||
"searchable": 1,
|
||||
"filterable": 0,
|
||||
"changeable": 0,
|
||||
"timeout": 60,
|
||||
"ext": "./json/zz.json"
|
||||
},
|
||||
{
|
||||
"key": "YYDSYS",
|
||||
"name": "🍡多多网盘┃4K弹幕",
|
||||
"type": 3,
|
||||
"api": "csp_PanWebShare",
|
||||
"jar": "./jars/config.jar",
|
||||
"searchable": 1,
|
||||
"filterable": 0,
|
||||
"changeable": 0,
|
||||
"timeout": 60,
|
||||
"ext": "./json/yyds.json"
|
||||
},
|
||||
{
|
||||
"key": "QuarkTZ",
|
||||
"name": "✊️团长网盘┃4K弹幕",
|
||||
"type": 3,
|
||||
"api": "csp_PanWebTz",
|
||||
"jar": "./jars/config.jar",
|
||||
"searchable": 1,
|
||||
"filterable": 0,
|
||||
"changeable": 0
|
||||
},
|
||||
{
|
||||
"key": "cloudLJ",
|
||||
"name": "🐋天翼雷鲸┃4K弹幕",
|
||||
"type": 3,
|
||||
"api": "csp_PanWebShareCloudLJ",
|
||||
"jar": "./jars/config.jar",
|
||||
"searchable": 0,
|
||||
"filterable": 0,
|
||||
"changeable": 0,
|
||||
"style": {
|
||||
"type": "rect",
|
||||
"ratio": 1.433
|
||||
},
|
||||
"ext": "./json/lj.json"
|
||||
},
|
||||
{
|
||||
"key": "海绵",
|
||||
"name": "🧽海绵资源┃4K弹幕",
|
||||
"type": 3,
|
||||
"api": "csp_PanWebShareCloudHM",
|
||||
"jar": "./jars/config.jar",
|
||||
"searchable": 1,
|
||||
"quickSearch": 1,
|
||||
"filterable": 1,
|
||||
"changeable": 1,
|
||||
"style": {
|
||||
"type": "list",
|
||||
"ratio": 1.433
|
||||
},
|
||||
"ext": "./json/hm.json"
|
||||
},
|
||||
{
|
||||
"key": "123",
|
||||
"name": "📅数字资源┃4K弹幕",
|
||||
"type": 3,
|
||||
"api": "csp_PanWebShareCloud123",
|
||||
"jar": "./jars/config.jar",
|
||||
"searchable": 1,
|
||||
"quickSearch": 1,
|
||||
"filterable": 1,
|
||||
"changeable": 1,
|
||||
"style": {
|
||||
"type": "list",
|
||||
"ratio": 1.433
|
||||
},
|
||||
"ext": "./json/123.json"
|
||||
},
|
||||
{
|
||||
"key": "趣盘",
|
||||
"name": "🥳百度趣盘┃4K弹幕",
|
||||
"type": 3,
|
||||
"api": "csp_PanWebQu",
|
||||
"jar": "./jars/config.jar",
|
||||
"searchable": 1,
|
||||
"quickSearch": 1,
|
||||
"filterable": 1,
|
||||
"style": {
|
||||
"type": "list",
|
||||
"ratio": 1.433
|
||||
},
|
||||
"ext": {
|
||||
"url": "http://192.140.161.171:2563",
|
||||
"key1": "aassddwwxxllsx1x",
|
||||
"key2": "aassddwwxxllsx1x",
|
||||
"key3": "aassddwwxxllsx1x"
|
||||
"url": "https://www.qupanshe.com"
|
||||
}
|
||||
},
|
||||
{
|
||||
"key": "盘库",
|
||||
"name": "🐼盘库资源┃4K弹幕",
|
||||
"type": 3,
|
||||
"api": "csp_PanWebKuBa",
|
||||
"jar": "./jars/config.jar",
|
||||
"searchable": 1,
|
||||
"quickSearch": 1,
|
||||
"filterable": 1,
|
||||
"changeable": 1,
|
||||
"ext": {
|
||||
"url": "https://panku8.com,https://yipanso.com"
|
||||
}
|
||||
},
|
||||
{
|
||||
"key": "夸父",
|
||||
"name": "🏃➡️夸父资源┃4K弹幕",
|
||||
"type": 3,
|
||||
"api": "csp_PanWebShareCloudKF",
|
||||
"jar": "./jars/config.jar",
|
||||
"searchable": 1,
|
||||
"quickSearch": 1,
|
||||
"filterable": 1,
|
||||
"changeable": 1,
|
||||
"style": {
|
||||
"type": "list",
|
||||
"ratio": 1.433
|
||||
},
|
||||
"ext": "./json/kf.json"
|
||||
},
|
||||
{
|
||||
"key": "爱影",
|
||||
"name": "🌀爱影┃超清┃弹幕",
|
||||
@@ -452,164 +603,6 @@
|
||||
"version": ""
|
||||
}
|
||||
},
|
||||
{
|
||||
"key": "玩偶哥哥",
|
||||
"name": "♨️玩偶备用┃4K弹幕",
|
||||
"type": 3,
|
||||
"api": "csp_PanWebShare",
|
||||
"jar": "./jars/config.jar",
|
||||
"searchable": 1,
|
||||
"quickSearch": 1,
|
||||
"filterable": 1,
|
||||
"ext": "./json/wogg.json"
|
||||
},
|
||||
{
|
||||
"key": "MoggV2",
|
||||
"name": "🍁优汐木偶┃4K弹幕",
|
||||
"type": 3,
|
||||
"api": "csp_PanWebShare",
|
||||
"jar": "./jars/config.jar",
|
||||
"searchable": 1,
|
||||
"quickSearch": 0,
|
||||
"filterable": 0,
|
||||
"ext": "./json/mogg.json"
|
||||
},
|
||||
{
|
||||
"key": "UcXmV2",
|
||||
"name": "🐂小二资源┃4K弹幕",
|
||||
"type": 3,
|
||||
"api": "csp_PanWebShare",
|
||||
"jar": "./jars/config.jar",
|
||||
"searchable": 1,
|
||||
"filterable": 0,
|
||||
"changeable": 0,
|
||||
"timeout": 60,
|
||||
"ext": "./json/ex.json"
|
||||
},
|
||||
{
|
||||
"key": "QuarkLaBiV2",
|
||||
"name": "🖍︎蜡笔资源┃4K弹幕",
|
||||
"type": 3,
|
||||
"api": "csp_PanWebShare",
|
||||
"jar": "./jars/config.jar",
|
||||
"searchable": 1,
|
||||
"filterable": 0,
|
||||
"changeable": 0,
|
||||
"timeout": 60,
|
||||
"ext": "./json/lb.json"
|
||||
},
|
||||
{
|
||||
"key": "QuarkzzV2",
|
||||
"name": "🏆️夸克至臻┃4K弹幕",
|
||||
"type": 3,
|
||||
"api": "csp_PanWebShare",
|
||||
"jar": "./jars/config.jar",
|
||||
"searchable": 1,
|
||||
"filterable": 0,
|
||||
"changeable": 0,
|
||||
"timeout": 60,
|
||||
"ext": "./json/zz.json"
|
||||
},
|
||||
{
|
||||
"key": "YYDSYS",
|
||||
"name": "🍡多多网盘┃4K弹幕",
|
||||
"type": 3,
|
||||
"api": "csp_PanWebShare",
|
||||
"jar": "./jars/config.jar",
|
||||
"searchable": 1,
|
||||
"filterable": 0,
|
||||
"changeable": 0,
|
||||
"timeout": 60,
|
||||
"ext": "./json/yyds.json"
|
||||
},
|
||||
{
|
||||
"key": "QuarkTZ",
|
||||
"name": "✊️团长网盘┃4K弹幕",
|
||||
"type": 3,
|
||||
"api": "csp_PanWebTz",
|
||||
"jar": "./jars/config.jar",
|
||||
"searchable": 1,
|
||||
"filterable": 0,
|
||||
"changeable": 0
|
||||
},
|
||||
{
|
||||
"key": "cloudLJ",
|
||||
"name": "🐋天翼雷鲸┃4K弹幕",
|
||||
"type": 3,
|
||||
"api": "csp_PanWebShareCloudLJ",
|
||||
"jar": "./jars/config.jar",
|
||||
"searchable": 0,
|
||||
"filterable": 0,
|
||||
"changeable": 0,
|
||||
"style": {
|
||||
"type": "rect",
|
||||
"ratio": 1.433
|
||||
},
|
||||
"ext": "./json/lj.json"
|
||||
},
|
||||
{
|
||||
"key": "海绵",
|
||||
"name": "🧽海绵资源┃4K弹幕",
|
||||
"type": 3,
|
||||
"api": "csp_PanWebShareCloudHM",
|
||||
"jar": "./jars/config.jar",
|
||||
"searchable": 1,
|
||||
"quickSearch": 1,
|
||||
"filterable": 1,
|
||||
"changeable": 1,
|
||||
"style": {
|
||||
"type": "list",
|
||||
"ratio": 1.433
|
||||
},
|
||||
"ext": "./json/hm.json"
|
||||
},
|
||||
{
|
||||
"key": "123",
|
||||
"name": "📅数字资源┃4K弹幕",
|
||||
"type": 3,
|
||||
"api": "csp_PanWebShareCloud123",
|
||||
"jar": "./jars/config.jar",
|
||||
"searchable": 1,
|
||||
"quickSearch": 1,
|
||||
"filterable": 1,
|
||||
"changeable": 1,
|
||||
"style": {
|
||||
"type": "list",
|
||||
"ratio": 1.433
|
||||
},
|
||||
"ext": "./json/123.json"
|
||||
},
|
||||
{
|
||||
"key": "趣盘",
|
||||
"name": "🥳百度趣盘┃4K弹幕",
|
||||
"type": 3,
|
||||
"api": "csp_PanWebQu",
|
||||
"jar": "./jars/config.jar",
|
||||
"searchable": 1,
|
||||
"quickSearch": 1,
|
||||
"filterable": 1,
|
||||
"style": {
|
||||
"type": "list",
|
||||
"ratio": 1.433
|
||||
},
|
||||
"ext": {
|
||||
"url": "https://www.qupanshe.com"
|
||||
}
|
||||
},
|
||||
{
|
||||
"key": "盘库",
|
||||
"name": "🐼盘库资源┃4K弹幕",
|
||||
"type": 3,
|
||||
"api": "csp_PanWebKuBa",
|
||||
"jar": "./jars/config.jar",
|
||||
"searchable": 1,
|
||||
"quickSearch": 1,
|
||||
"filterable": 1,
|
||||
"changeable": 1,
|
||||
"ext": {
|
||||
"url": "https://panku8.com,https://yipanso.com"
|
||||
}
|
||||
},
|
||||
{
|
||||
"key": "瓜子影视",
|
||||
"name": "🍉瓜子┃蓝光┃无广",
|
||||
@@ -705,16 +698,6 @@
|
||||
"quickSearch": 1,
|
||||
"changeable": 0
|
||||
},
|
||||
{
|
||||
"key": "白白",
|
||||
"name": "🐭白白┃秒播┃纯净",
|
||||
"type": 3,
|
||||
"api": "csp_SbaibaiGuard",
|
||||
"playerType": 2,
|
||||
"searchable": 1,
|
||||
"quickSearch": 1,
|
||||
"changeable": 1
|
||||
},
|
||||
{
|
||||
"key": "原创",
|
||||
"name": "☀原创┃不卡┃纯净",
|
||||
@@ -755,16 +738,6 @@
|
||||
"quickSearch": 1,
|
||||
"changeable": 1
|
||||
},
|
||||
{
|
||||
"key": "溢彩",
|
||||
"name": "💡溢彩┃秒播┃纯净",
|
||||
"type": 3,
|
||||
"api": "csp_AppSKGuard",
|
||||
"searchable": 1,
|
||||
"quickSearch": 0,
|
||||
"changeable": 0,
|
||||
"ext": "rfOb1uAWbkRHp7hdxprG9un3+TfN183v1zIyaYDoDAIaLw5L8Dp8+v88LrEL3dBzrmWbdMBX0WNm7HtkQuw0AIzUurGBVyPqCKzDmbriATuukhctJlsLo8KxCw=="
|
||||
},
|
||||
{
|
||||
"key": "Lib",
|
||||
"name": "🌟立播┃秒播┃纯净",
|
||||
@@ -782,7 +755,7 @@
|
||||
"key": "zxzj",
|
||||
"name": "🍊在线┃外剧┃纯净",
|
||||
"type": 3,
|
||||
"api": "csp_Zxzj",
|
||||
"api": "csp_ZxzjGuard",
|
||||
"timeout": 15,
|
||||
"searchable": 1,
|
||||
"quickSearch": 1,
|
||||
@@ -830,17 +803,6 @@
|
||||
"changeable": 1,
|
||||
"ext": "uqGL1bNENExT7/hGxpSE5qU="
|
||||
},
|
||||
{
|
||||
"key": "即看",
|
||||
"name": "🐻即看┃多线┃纯净",
|
||||
"type": 3,
|
||||
"api": "csp_AppSxGuard",
|
||||
"timeout": 10,
|
||||
"searchable": 1,
|
||||
"quickSearch": 0,
|
||||
"changeable": 0,
|
||||
"ext": "rfOX1voDIQhH8epBwtCFsub1+2maloq8lmJuL821WUsZJAZft2UtrrwhKK5Zxt1toWyFctBUmThhuDAjVuU="
|
||||
},
|
||||
{
|
||||
"key": "欢视",
|
||||
"name": "👓欢视┃多线┃纯净",
|
||||
@@ -884,20 +846,6 @@
|
||||
"type": "list"
|
||||
}
|
||||
},
|
||||
{
|
||||
"key": "664K",
|
||||
"name": "🌀沐风┃蓝光┃无广",
|
||||
"type": 3,
|
||||
"api": "csp_XBPQ",
|
||||
"ext": "http://我不是.摸鱼儿.com/api/moyu.php?file=664k"
|
||||
},
|
||||
{
|
||||
"key": "4Kdy",
|
||||
"name": "🕊️凝安┃蓝光┃无广",
|
||||
"type": 3,
|
||||
"api": "csp_XBPQ",
|
||||
"ext": "http://我不是.摸鱼儿.com/api/moyu.php?file=4kdy"
|
||||
},
|
||||
{
|
||||
"key": "88js",
|
||||
"name": "⚽ 88┃看球┃直播",
|
||||
@@ -1109,7 +1057,7 @@
|
||||
},
|
||||
{
|
||||
"key": "Biliych",
|
||||
"name": "🅱哔哔演唱会┃4K弹幕",
|
||||
"name": "🅱演唱会集┃4K弹幕",
|
||||
"type": 3,
|
||||
"api": "csp_BiliGuard",
|
||||
"style": {
|
||||
@@ -1201,6 +1149,20 @@
|
||||
"json": "http://我不是.摸鱼儿.com/api/moyu.php?file=高中课堂"
|
||||
}
|
||||
},
|
||||
{
|
||||
"key": "56动漫",
|
||||
"name": "🫠56┃动漫┃蓝光",
|
||||
"type": 3,
|
||||
"api": "./api/drpy2.min.js",
|
||||
"ext": "./js/56DM.js"
|
||||
},
|
||||
{
|
||||
"key": "NT动漫",
|
||||
"name": "🥶NT┃动漫┃蓝光",
|
||||
"type": 3,
|
||||
"api": "./api/drpy2.min.js",
|
||||
"ext": "./js/NTDM.js"
|
||||
},
|
||||
{
|
||||
"key": "曼波动漫",
|
||||
"name": "🍼曼波┃动漫┃蓝光",
|
||||
@@ -1216,6 +1178,22 @@
|
||||
"version": ""
|
||||
}
|
||||
},
|
||||
{
|
||||
"key": "稀饭动漫",
|
||||
"name": "🥣稀饭┃动漫┃蓝光",
|
||||
"type": 3,
|
||||
"quickSearch": 1,
|
||||
"api": "csp_AppGet",
|
||||
"jar": "./jars/config.jar",
|
||||
"ext": {
|
||||
"url": "",
|
||||
"site": "https://xfapp-1305390065.cos.ap-guangzhou.myqcloud.com/getapp.txt",
|
||||
"dataKey": "1yZ2Spn9krnzVKoC",
|
||||
"dataIv": "1yZ2Spn9krnzVKoC",
|
||||
"deviceId": "",
|
||||
"version": ""
|
||||
}
|
||||
},
|
||||
{
|
||||
"key": "咕咕动漫",
|
||||
"name": "🍚咕咕┃动漫┃蓝光",
|
||||
@@ -1239,7 +1217,8 @@
|
||||
"api": "csp_AppGet",
|
||||
"jar": "./jars/config.jar",
|
||||
"ext": {
|
||||
"url": "http://45.43.29.111:9527",
|
||||
"url": "https://get.mymifun.com",
|
||||
"site": "",
|
||||
"dataKey": "GETMIFUNGEIMIFUN",
|
||||
"dataIv": "GETMIFUNGEIMIFUN",
|
||||
"deviceId": "",
|
||||
@@ -1261,35 +1240,6 @@
|
||||
"version": "170"
|
||||
}
|
||||
},
|
||||
{
|
||||
"key": "黑猫动漫",
|
||||
"name": "🐈⬛黑猫┃动漫┃蓝光",
|
||||
"type": 3,
|
||||
"quickSearch": 1,
|
||||
"api": "csp_AppGet",
|
||||
"jar": "./jars/config.jar",
|
||||
"ext": {
|
||||
"url": "https://dm.xxdm123.top:9991",
|
||||
"dataKey": "0fe3b5781782c621",
|
||||
"dataIv": "0fe3b5781782c621",
|
||||
"deviceId": "",
|
||||
"version": "203"
|
||||
}
|
||||
},
|
||||
{
|
||||
"key": "樱花",
|
||||
"name": "🌸樱花┃动漫┃樱花",
|
||||
"type": 3,
|
||||
"api": "csp_XBPQ",
|
||||
"ext": "http://我不是.摸鱼儿.com/api/moyu.php?file=樱花动漫"
|
||||
},
|
||||
{
|
||||
"key": "巴士动漫",
|
||||
"name": "🚎巴士┃动漫┃樱花",
|
||||
"type": 3,
|
||||
"api": "csp_XYQHiker",
|
||||
"ext": "http://我不是.摸鱼儿.com/api/moyu.php?file=巴士"
|
||||
},
|
||||
{
|
||||
"key": "duanju",
|
||||
"name": "🌟星芽┃短剧┃热推",
|
||||
@@ -1300,6 +1250,26 @@
|
||||
"quickSearch": 0,
|
||||
"filterable": 0
|
||||
},
|
||||
{
|
||||
"key": "甜圈短剧",
|
||||
"name": "🍩甜圈┃短剧┃热推",
|
||||
"type": 3,
|
||||
"api": "./api/TQDJ.py",
|
||||
"searchable": 1,
|
||||
"changeable": 1,
|
||||
"quickSearch": 1,
|
||||
"filterable": 1
|
||||
},
|
||||
{
|
||||
"key": "剧王短剧",
|
||||
"name": "剧王┃短剧┃热推",
|
||||
"type": 3,
|
||||
"api": "./api/JWDJ.py",
|
||||
"searchable": 1,
|
||||
"changeable": 1,
|
||||
"quickSearch": 1,
|
||||
"filterable": 1
|
||||
},
|
||||
{
|
||||
"key": "河马短剧",
|
||||
"name": "🦛河马┃短剧┃热推",
|
||||
@@ -1312,15 +1282,11 @@
|
||||
"playerType": 2
|
||||
},
|
||||
{
|
||||
"key": "偷乐短剧",
|
||||
"name": "🤣偷乐┃短剧┃热推",
|
||||
"key": "本地",
|
||||
"name": "📁文件┃本地┃资源",
|
||||
"type": 3,
|
||||
"api": "./api/偷乐短剧.py",
|
||||
"searchable": 1,
|
||||
"changeable": 1,
|
||||
"quickSearch": 1,
|
||||
"filterable": 1,
|
||||
"playerType": 2
|
||||
"jar": "./jars/config.jar",
|
||||
"api": "csp_LocalFile"
|
||||
},
|
||||
{
|
||||
"key": "QuarkYunPan",
|
||||
@@ -1385,13 +1351,11 @@
|
||||
"key": "push_agent",
|
||||
"name": "🛴手机┃推送┃链接",
|
||||
"type": 3,
|
||||
"api": "csp_PushGuard",
|
||||
"api": "csp_Push",
|
||||
"jar": "./jars/config.jar",
|
||||
"searchable": 0,
|
||||
"quickSearch": 0,
|
||||
"ext": {
|
||||
"Cloud-drive": "http://127.0.0.1:9978/file/TVBox/Cloud-drive.txt",
|
||||
"from": "4k|auto"
|
||||
}
|
||||
"filterable": 0,
|
||||
"changeable": 0
|
||||
}
|
||||
],
|
||||
"parses": [
|
||||
|
||||
329
摸鱼儿/api/JWDJ.py
Normal file
329
摸鱼儿/api/JWDJ.py
Normal file
@@ -0,0 +1,329 @@
|
||||
# coding=utf-8
|
||||
# !/usr/bin/python
|
||||
|
||||
"""
|
||||
|
||||
作者 丢丢喵推荐 🚓 内容均从互联网收集而来 仅供交流学习使用 版权归原创者所有 如侵犯了您的权益 请通知作者 将及时删除侵权内容
|
||||
====================Diudiumiao====================
|
||||
|
||||
"""
|
||||
|
||||
from Crypto.Util.Padding import unpad
|
||||
from Crypto.Util.Padding import pad
|
||||
from urllib.parse import unquote
|
||||
from Crypto.Cipher import ARC4
|
||||
from urllib.parse import quote
|
||||
from base.spider import Spider
|
||||
from Crypto.Cipher import AES
|
||||
from datetime import datetime
|
||||
from bs4 import BeautifulSoup
|
||||
from base64 import b64decode
|
||||
import urllib.request
|
||||
import urllib.parse
|
||||
import datetime
|
||||
import binascii
|
||||
import requests
|
||||
import base64
|
||||
import json
|
||||
import time
|
||||
import sys
|
||||
import re
|
||||
import os
|
||||
|
||||
sys.path.append('..')
|
||||
|
||||
xurl = "https://djw1.com"
|
||||
|
||||
headerx = {
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.87 Safari/537.36'
|
||||
}
|
||||
|
||||
class Spider(Spider):
|
||||
global xurl
|
||||
global headerx
|
||||
|
||||
def getName(self):
|
||||
return "首页"
|
||||
|
||||
def init(self, extend):
|
||||
pass
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
pass
|
||||
|
||||
def manualVideoCheck(self):
|
||||
pass
|
||||
|
||||
def extract_middle_text(self, text, start_str, end_str, pl, start_index1: str = '', end_index2: str = ''):
|
||||
if pl == 3:
|
||||
plx = []
|
||||
while True:
|
||||
start_index = text.find(start_str)
|
||||
if start_index == -1:
|
||||
break
|
||||
end_index = text.find(end_str, start_index + len(start_str))
|
||||
if end_index == -1:
|
||||
break
|
||||
middle_text = text[start_index + len(start_str):end_index]
|
||||
plx.append(middle_text)
|
||||
text = text.replace(start_str + middle_text + end_str, '')
|
||||
if len(plx) > 0:
|
||||
purl = ''
|
||||
for i in range(len(plx)):
|
||||
matches = re.findall(start_index1, plx[i])
|
||||
output = ""
|
||||
for match in matches:
|
||||
match3 = re.search(r'(?:^|[^0-9])(\d+)(?:[^0-9]|$)', match[1])
|
||||
if match3:
|
||||
number = match3.group(1)
|
||||
else:
|
||||
number = 0
|
||||
if 'http' not in match[0]:
|
||||
output += f"#{match[1]}${number}{xurl}{match[0]}"
|
||||
else:
|
||||
output += f"#{match[1]}${number}{match[0]}"
|
||||
output = output[1:]
|
||||
purl = purl + output + "$$$"
|
||||
purl = purl[:-3]
|
||||
return purl
|
||||
else:
|
||||
return ""
|
||||
else:
|
||||
start_index = text.find(start_str)
|
||||
if start_index == -1:
|
||||
return ""
|
||||
end_index = text.find(end_str, start_index + len(start_str))
|
||||
if end_index == -1:
|
||||
return ""
|
||||
|
||||
if pl == 0:
|
||||
middle_text = text[start_index + len(start_str):end_index]
|
||||
return middle_text.replace("\\", "")
|
||||
|
||||
if pl == 1:
|
||||
middle_text = text[start_index + len(start_str):end_index]
|
||||
matches = re.findall(start_index1, middle_text)
|
||||
if matches:
|
||||
jg = ' '.join(matches)
|
||||
return jg
|
||||
|
||||
if pl == 2:
|
||||
middle_text = text[start_index + len(start_str):end_index]
|
||||
matches = re.findall(start_index1, middle_text)
|
||||
if matches:
|
||||
new_list = [f'{item}' for item in matches]
|
||||
jg = '$$$'.join(new_list)
|
||||
return jg
|
||||
|
||||
def homeContent(self, filter):
|
||||
result = {"class": []}
|
||||
|
||||
detail = requests.get(url=xurl + "/all/", headers=headerx)
|
||||
detail.encoding = "utf-8"
|
||||
res = detail.text
|
||||
|
||||
doc = BeautifulSoup(res, "lxml")
|
||||
|
||||
soups = doc.find_all('section', class_="container items")
|
||||
|
||||
for soup in soups:
|
||||
vods = soup.find_all('li')
|
||||
|
||||
for vod in vods:
|
||||
|
||||
id = vod.find('a')['href']
|
||||
|
||||
name = vod.text.strip()
|
||||
|
||||
result["class"].append({"type_id": id, "type_name": "" + name})
|
||||
|
||||
return result
|
||||
|
||||
def homeVideoContent(self):
|
||||
pass
|
||||
|
||||
def categoryContent(self, cid, pg, filter, ext):
|
||||
result = {}
|
||||
videos = []
|
||||
|
||||
if pg:
|
||||
page = int(pg)
|
||||
else:
|
||||
page = 1
|
||||
|
||||
url = f'{cid}page/{str(page)}/'
|
||||
detail = requests.get(url=url, headers=headerx)
|
||||
detail.encoding = "utf-8"
|
||||
res = detail.text
|
||||
doc = BeautifulSoup(res, "lxml")
|
||||
|
||||
soups = doc.find_all('section', class_="container items")
|
||||
|
||||
for soup in soups:
|
||||
vods = soup.find_all('li')
|
||||
|
||||
for vod in vods:
|
||||
|
||||
name = vod.find('img')['alt']
|
||||
|
||||
ids = vod.find('a', class_="image-line")
|
||||
id = ids['href']
|
||||
|
||||
pic = vod.find('img')['src']
|
||||
|
||||
remark = self.extract_middle_text(str(vod), 'class="remarks light">', '<', 0)
|
||||
|
||||
video = {
|
||||
"vod_id": id,
|
||||
"vod_name": name,
|
||||
"vod_pic": pic,
|
||||
"vod_remarks": '▶️' + remark
|
||||
}
|
||||
videos.append(video)
|
||||
|
||||
result = {'list': videos}
|
||||
result['page'] = pg
|
||||
result['pagecount'] = 9999
|
||||
result['limit'] = 90
|
||||
result['total'] = 999999
|
||||
return result
|
||||
|
||||
def detailContent(self, ids):
|
||||
did = ids[0]
|
||||
result = {}
|
||||
videos = []
|
||||
xianlu = ''
|
||||
bofang = ''
|
||||
|
||||
if 'http' not in did:
|
||||
did = xurl + did
|
||||
|
||||
res = requests.get(url=did, headers=headerx)
|
||||
res.encoding = "utf-8"
|
||||
res = res.text
|
||||
doc = BeautifulSoup(res, "lxml")
|
||||
|
||||
url = 'https://fs-im-kefu.7moor-fs1.com/ly/4d2c3f00-7d4c-11e5-af15-41bf63ae4ea0/1732707176882/jiduo.txt'
|
||||
response = requests.get(url)
|
||||
response.encoding = 'utf-8'
|
||||
code = response.text
|
||||
name = self.extract_middle_text(code, "s1='", "'", 0)
|
||||
Jumps = self.extract_middle_text(code, "s2='", "'", 0)
|
||||
|
||||
content = '摸鱼:不带脑子爽就完了!📢' + self.extract_middle_text(res,'class="info-detail">','<', 0)
|
||||
|
||||
remarks = self.extract_middle_text(res, 'class="info-mark">', '<', 0)
|
||||
|
||||
year = self.extract_middle_text(res, 'class="info-addtime">', '<', 0)
|
||||
|
||||
if name not in content:
|
||||
bofang = Jumps
|
||||
xianlu = '1'
|
||||
else:
|
||||
soups = doc.find('div', class_="ep-list-items")
|
||||
|
||||
soup = soups.find_all('a')
|
||||
|
||||
for sou in soup:
|
||||
|
||||
id = sou['href']
|
||||
|
||||
name = sou.text.strip()
|
||||
|
||||
bofang = bofang + name + '$' + id + '#'
|
||||
|
||||
bofang = bofang[:-1]
|
||||
|
||||
xianlu = '专线'
|
||||
|
||||
videos.append({
|
||||
"vod_id": did,
|
||||
"vod_remarks": remarks,
|
||||
"vod_year": year,
|
||||
"vod_content": content,
|
||||
"vod_play_from": xianlu,
|
||||
"vod_play_url": bofang
|
||||
})
|
||||
|
||||
result['list'] = videos
|
||||
return result
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
|
||||
res = requests.get(url=id, headers=headerx)
|
||||
res.encoding = "utf-8"
|
||||
res = res.text
|
||||
|
||||
url = self.extract_middle_text(res, '"wwm3u8":"', '"', 0).replace('\\', '')
|
||||
|
||||
result = {}
|
||||
result["parse"] = 0
|
||||
result["playUrl"] = ''
|
||||
result["url"] = url
|
||||
result["header"] = headerx
|
||||
return result
|
||||
|
||||
def searchContentPage(self, key, quick, pg):
|
||||
result = {}
|
||||
videos = []
|
||||
|
||||
if pg:
|
||||
page = int(pg)
|
||||
else:
|
||||
page = 1
|
||||
|
||||
url = f'{xurl}/search/{key}/page/{str(page)}/'
|
||||
detail = requests.get(url=url, headers=headerx)
|
||||
detail.encoding = "utf-8"
|
||||
res = detail.text
|
||||
doc = BeautifulSoup(res, "lxml")
|
||||
|
||||
soups = doc.find_all('section', class_="container items")
|
||||
|
||||
for soup in soups:
|
||||
vods = soup.find_all('li')
|
||||
|
||||
for vod in vods:
|
||||
|
||||
name = vod.find('img')['alt']
|
||||
|
||||
ids = vod.find('a', class_="image-line")
|
||||
id = ids['href']
|
||||
|
||||
pic = vod.find('img')['src']
|
||||
|
||||
remark = self.extract_middle_text(str(vod), 'class="remarks light">', '<', 0)
|
||||
|
||||
video = {
|
||||
"vod_id": id,
|
||||
"vod_name": name,
|
||||
"vod_pic": pic,
|
||||
"vod_remarks": '▶️' + remark
|
||||
}
|
||||
videos.append(video)
|
||||
|
||||
result['list'] = videos
|
||||
result['page'] = pg
|
||||
result['pagecount'] = 9999
|
||||
result['limit'] = 90
|
||||
result['total'] = 999999
|
||||
return result
|
||||
|
||||
def searchContent(self, key, quick, pg="1"):
|
||||
return self.searchContentPage(key, quick, '1')
|
||||
|
||||
def localProxy(self, params):
|
||||
if params['type'] == "m3u8":
|
||||
return self.proxyM3u8(params)
|
||||
elif params['type'] == "media":
|
||||
return self.proxyMedia(params)
|
||||
elif params['type'] == "ts":
|
||||
return self.proxyTs(params)
|
||||
return None
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
156
摸鱼儿/api/TQDJ.py
Normal file
156
摸鱼儿/api/TQDJ.py
Normal file
@@ -0,0 +1,156 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# by @嗷呜
|
||||
import sys
|
||||
sys.path.append('..')
|
||||
from base.spider import Spider
|
||||
|
||||
class Spider(Spider):
|
||||
|
||||
def init(self, extend=""):
|
||||
pass
|
||||
|
||||
def getName(self):
|
||||
return "甜圈短剧"
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
return True
|
||||
|
||||
def manualVideoCheck(self):
|
||||
return False
|
||||
|
||||
def destroy(self):
|
||||
pass
|
||||
|
||||
# 更新为新的域名
|
||||
ahost = 'https://mov.cenguigui.cn'
|
||||
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
|
||||
'sec-ch-ua-platform': '"macOS"',
|
||||
'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="134", "Google Chrome";v="134"',
|
||||
'DNT': '1',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'Sec-Fetch-Site': 'cross-site',
|
||||
'Sec-Fetch-Mode': 'no-cors',
|
||||
'Sec-Fetch-Dest': 'video',
|
||||
'Sec-Fetch-Storage-Access': 'active',
|
||||
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
|
||||
}
|
||||
|
||||
def homeContent(self, filter):
|
||||
result = {'class': [{'type_id': '推荐榜', 'type_name': '🔥 推荐榜'},
|
||||
{'type_id': '新剧', 'type_name': '🎬 新剧'},
|
||||
{'type_id': '逆袭', 'type_name': '🎬 逆袭'},
|
||||
{'type_id': '霸总', 'type_name': '🎬 霸总'},
|
||||
{'type_id': '现代言情', 'type_name': '🎬 现代言情'},
|
||||
{'type_id': '打脸虐渣', 'type_name': '🎬 打脸虐渣'},
|
||||
{'type_id': '豪门恩怨', 'type_name': '🎬 豪门恩怨'},
|
||||
{'type_id': '神豪', 'type_name': '🎬 神豪'},
|
||||
{'type_id': '马甲', 'type_name': '🎬 马甲'},
|
||||
{'type_id': '都市日常', 'type_name': '🎬 都市日常'},
|
||||
{'type_id': '战神归来', 'type_name': '🎬 战神归来'},
|
||||
{'type_id': '小人物', 'type_name': '🎬 小人物'},
|
||||
{'type_id': '女性成长', 'type_name': '🎬 女性成长'},
|
||||
{'type_id': '大女主', 'type_name': '🎬 大女主'},
|
||||
{'type_id': '穿越', 'type_name': '🎬 穿越'},
|
||||
{'type_id': '都市修仙', 'type_name': '🎬 都市修仙'},
|
||||
{'type_id': '强者回归', 'type_name': '🎬 强者回归'},
|
||||
{'type_id': '亲情', 'type_name': '🎬 亲情'},
|
||||
{'type_id': '古装', 'type_name': '🎬 古装'},
|
||||
{'type_id': '重生', 'type_name': '🎬 重生'},
|
||||
{'type_id': '闪婚', 'type_name': '🎬 闪婚'},
|
||||
{'type_id': '赘婿逆袭', 'type_name': '🎬 赘婿逆袭'},
|
||||
{'type_id': '虐恋', 'type_name': '🎬 虐恋'},
|
||||
{'type_id': '追妻', 'type_name': '🎬 追妻'},
|
||||
{'type_id': '天下无敌', 'type_name': '🎬 天下无敌'},
|
||||
{'type_id': '家庭伦理', 'type_name': '🎬 家庭伦理'},
|
||||
{'type_id': '萌宝', 'type_name': '🎬 萌宝'},
|
||||
{'type_id': '古风权谋', 'type_name': '🎬 古风权谋'},
|
||||
{'type_id': '职场', 'type_name': '🎬 职场'},
|
||||
{'type_id': '奇幻脑洞', 'type_name': '🎬 奇幻脑洞'},
|
||||
{'type_id': '异能', 'type_name': '🎬 异能'},
|
||||
{'type_id': '无敌神医', 'type_name': '🎬 无敌神医'},
|
||||
{'type_id': '古风言情', 'type_name': '🎬 古风言情'},
|
||||
{'type_id': '传承觉醒', 'type_name': '🎬 传承觉醒'},
|
||||
{'type_id': '现言甜宠', 'type_name': '🎬 现言甜宠'},
|
||||
{'type_id': '奇幻爱情', 'type_name': '🎬 奇幻爱情'},
|
||||
{'type_id': '乡村', 'type_name': '🎬 乡村'},
|
||||
{'type_id': '历史古代', 'type_name': '🎬 历史古代'},
|
||||
{'type_id': '王妃', 'type_name': '🎬 王妃'},
|
||||
{'type_id': '高手下山', 'type_name': '🎬 高手下山'},
|
||||
{'type_id': '娱乐圈', 'type_name': '🎬 娱乐圈'},
|
||||
{'type_id': '强强联合', 'type_name': '🎬 强强联合'},
|
||||
{'type_id': '破镜重圆', 'type_name': '🎬 破镜重圆'},
|
||||
{'type_id': '暗恋成真', 'type_name': '🎬 暗恋成真'},
|
||||
{'type_id': '民国', 'type_name': '🎬 民国'},
|
||||
{'type_id': '欢喜冤家', 'type_name': '🎬 欢喜冤家'},
|
||||
{'type_id': '系统', 'type_name': '🎬 系统'},
|
||||
{'type_id': '真假千金', 'type_name': '🎬 真假千金'},
|
||||
{'type_id': '龙王', 'type_name': '🎬 龙王'},
|
||||
{'type_id': '校园', 'type_name': '🎬 校园'},
|
||||
{'type_id': '穿书', 'type_name': '🎬 穿书'},
|
||||
{'type_id': '女帝', 'type_name': '🎬 女帝'},
|
||||
{'type_id': '团宠', 'type_name': '🎬 团宠'},
|
||||
{'type_id': '年代爱情', 'type_name': '🎬 年代爱情'},
|
||||
{'type_id': '玄幻仙侠', 'type_name': '🎬 玄幻仙侠'},
|
||||
{'type_id': '青梅竹马', 'type_name': '🎬 青梅竹马'},
|
||||
{'type_id': '悬疑推理', 'type_name': '🎬 悬疑推理'},
|
||||
{'type_id': '皇后', 'type_name': '🎬 皇后'},
|
||||
{'type_id': '替身', 'type_name': '🎬 替身'},
|
||||
{'type_id': '大叔', 'type_name': '🎬 大叔'},
|
||||
{'type_id': '喜剧', 'type_name': '🎬 喜剧'},
|
||||
{'type_id': '剧情', 'type_name': '🎬 剧情'}]}
|
||||
return result
|
||||
|
||||
def homeVideoContent(self):
|
||||
return []
|
||||
|
||||
def categoryContent(self, tid, pg, filter, extend):
|
||||
params = {
|
||||
'classname': tid,
|
||||
'offset': str((int(pg) - 1)),
|
||||
}
|
||||
# 更新请求路径为 /duanju/api.php
|
||||
data = self.fetch(f'{self.ahost}/duanju/api.php', params=params, headers=self.headers).json()
|
||||
videos = []
|
||||
for k in data['data']:
|
||||
videos.append({
|
||||
'vod_id': k.get('book_id'),
|
||||
'vod_name': k.get('title'),
|
||||
'vod_pic': k.get('cover'),
|
||||
'vod_year': k.get('score'),
|
||||
'vod_remarks': f"{k.get('sub_title')}|{k.get('episode_cnt')}"
|
||||
})
|
||||
result = {}
|
||||
result['list'] = videos
|
||||
result['page'] = pg
|
||||
result['pagecount'] = 9999
|
||||
result['limit'] = 90
|
||||
result['total'] = 999999
|
||||
return result
|
||||
|
||||
def detailContent(self, ids):
|
||||
# 更新请求路径为 /duanju/api.php
|
||||
v = self.fetch(f'{self.ahost}/duanju/api.php', params={'book_id': ids[0]}, headers=self.headers).json()
|
||||
vod = {
|
||||
'vod_id': ids[0],
|
||||
'vod_name': v.get('title'),
|
||||
'type_name': v.get('category'),
|
||||
'vod_year': v.get('time'),
|
||||
'vod_remarks': v.get('duration'),
|
||||
'vod_content': v.get('desc'),
|
||||
'vod_play_from': '爱看短剧',
|
||||
'vod_play_url': '#'.join([f"{i['title']}${i['video_id']}" for i in v['data']])
|
||||
}
|
||||
return {'list': [vod]}
|
||||
|
||||
def searchContent(self, key, quick, pg="1"):
|
||||
return self.categoryContent(key, pg, True, {})
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
# 更新请求路径为 /duanju/api.php
|
||||
data = self.fetch(f'{self.ahost}/duanju/api.php', params={'video_id': id}, headers=self.headers).json()
|
||||
return {'parse': 0, 'url': data['data']['url'], 'header': self.headers}
|
||||
|
||||
def localProxy(self, param):
|
||||
pass
|
||||
790
摸鱼儿/api/偷乐短剧.py
790
摸鱼儿/api/偷乐短剧.py
@@ -1,790 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
# 偷乐短剧爬虫
|
||||
|
||||
import sys
|
||||
import json
|
||||
import re
|
||||
import time
|
||||
import urllib.parse
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
# 导入基础类
|
||||
sys.path.append('../../')
|
||||
try:
|
||||
from base.spider import Spider
|
||||
except ImportError:
|
||||
# 本地调试时的替代实现
|
||||
class Spider:
|
||||
def init(self, extend=""):
|
||||
pass
|
||||
|
||||
class Spider(Spider):
|
||||
def __init__(self):
|
||||
# 网站主URL
|
||||
self.siteUrl = "https://www.toule.top"
|
||||
|
||||
# 根据网站实际结构,分类链接格式为: /index.php/vod/show/class/分类名/id/1.html
|
||||
# 分类ID映射 - 从网站中提取的分类
|
||||
self.cateManual = {
|
||||
"男频": "/index.php/vod/show/class/%E7%94%B7%E9%A2%91/id/1.html",
|
||||
"女频": "/index.php/vod/show/class/%E5%A5%B3%E9%A2%91/id/1.html",
|
||||
"都市": "/index.php/vod/show/class/%E9%83%BD%E5%B8%82/id/1.html",
|
||||
"赘婿": "/index.php/vod/show/class/%E8%B5%98%E5%A9%BF/id/1.html",
|
||||
"战神": "/index.php/vod/show/class/%E6%88%98%E7%A5%9E/id/1.html",
|
||||
"古代言情": "/index.php/vod/show/class/%E5%8F%A4%E4%BB%A3%E8%A8%80%E6%83%85/id/1.html",
|
||||
"现代言情": "/index.php/vod/show/class/%E7%8E%B0%E4%BB%A3%E8%A8%80%E6%83%85/id/1.html",
|
||||
"历史": "/index.php/vod/show/class/%E5%8E%86%E5%8F%B2/id/1.html",
|
||||
"玄幻": "/index.php/vod/show/class/%E7%8E%84%E5%B9%BB/id/1.html",
|
||||
"搞笑": "/index.php/vod/show/class/%E6%90%9E%E7%AC%91/id/1.html",
|
||||
"甜宠": "/index.php/vod/show/class/%E7%94%9C%E5%AE%A0/id/1.html",
|
||||
"励志": "/index.php/vod/show/class/%E5%8A%B1%E5%BF%97/id/1.html",
|
||||
"逆袭": "/index.php/vod/show/class/%E9%80%86%E8%A2%AD/id/1.html",
|
||||
"穿越": "/index.php/vod/show/class/%E7%A9%BF%E8%B6%8A/id/1.html",
|
||||
"古装": "/index.php/vod/show/class/%E5%8F%A4%E8%A3%85/id/1.html"
|
||||
}
|
||||
|
||||
# 请求头
|
||||
self.headers = {
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
|
||||
"Referer": "https://www.toule.top/",
|
||||
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
|
||||
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
|
||||
"Accept-Encoding": "gzip, deflate, br",
|
||||
"Connection": "keep-alive",
|
||||
}
|
||||
|
||||
|
||||
# 缓存
|
||||
self.cache = {}
|
||||
self.cache_timeout = {}
|
||||
|
||||
def getName(self):
|
||||
return "偷乐短剧"
|
||||
|
||||
def init(self, extend=""):
|
||||
# 初始化方法,可以留空
|
||||
return
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
"""判断是否为视频格式"""
|
||||
video_formats = ['.mp4', '.m3u8', '.ts', '.flv', '.avi', '.mkv', '.mov', '.rmvb', '.3gp']
|
||||
for format in video_formats:
|
||||
if format in url.lower():
|
||||
return True
|
||||
return False
|
||||
|
||||
def manualVideoCheck(self):
|
||||
"""是否需要手动检查视频"""
|
||||
return False
|
||||
|
||||
# 工具方法 - 网络请求
|
||||
def fetch(self, url, headers=None, data=None, method="GET"):
|
||||
"""统一的网络请求方法"""
|
||||
try:
|
||||
if headers is None:
|
||||
headers = self.headers.copy()
|
||||
|
||||
if method.upper() == "GET":
|
||||
response = requests.get(url, headers=headers, params=data, timeout=10,verify=False)
|
||||
else: # POST
|
||||
response = requests.post(url, headers=headers, data=data, timeout=10,verify=False)
|
||||
|
||||
response.raise_for_status()
|
||||
response.encoding = response.apparent_encoding or 'utf-8'
|
||||
return response
|
||||
except Exception as e:
|
||||
self.log(f"请求失败: {url}, 错误: {str(e)}", "ERROR")
|
||||
return None
|
||||
|
||||
# 缓存方法
|
||||
def getCache(self, key, timeout=3600):
|
||||
"""获取缓存数据"""
|
||||
if key in self.cache and key in self.cache_timeout:
|
||||
if time.time() < self.cache_timeout[key]:
|
||||
return self.cache[key]
|
||||
else:
|
||||
del self.cache[key]
|
||||
del self.cache_timeout[key]
|
||||
return None
|
||||
|
||||
def setCache(self, key, value, timeout=3600):
|
||||
"""设置缓存数据"""
|
||||
self.cache[key] = value
|
||||
self.cache_timeout[key] = time.time() + timeout
|
||||
|
||||
# 日志方法
|
||||
def log(self, msg, level='INFO'):
|
||||
"""记录日志"""
|
||||
levels = {
|
||||
'DEBUG': 0,
|
||||
'INFO': 1,
|
||||
'WARNING': 2,
|
||||
'ERROR': 3
|
||||
}
|
||||
|
||||
current_level = 'INFO' # 可以设置为DEBUG以获取更多信息
|
||||
|
||||
if levels.get(level, 4) >= levels.get(current_level, 1):
|
||||
print(f"[{level}] {time.strftime('%Y-%m-%d %H:%M:%S')} - {msg}")
|
||||
|
||||
# 辅助方法 - 从URL中提取视频ID
|
||||
def extractVodId(self, url):
|
||||
"""从URL中提取视频ID"""
|
||||
# 路径格式: /index.php/vod/play/id/9024/sid/1/nid/1.html
|
||||
match = re.search(r'/id/(\d+)/', url)
|
||||
if match:
|
||||
return match.group(1)
|
||||
return ""
|
||||
|
||||
# 辅助方法 - 从网页内容中提取分类
|
||||
def extractCategories(self, text):
|
||||
"""从网页内容中提取分类标签"""
|
||||
cats = []
|
||||
# 匹配标签字符串,例如: "男频,逆袭,亲情,短剧"
|
||||
if "," in text:
|
||||
parts = text.split(",")
|
||||
for part in parts:
|
||||
part = part.strip()
|
||||
if part and part != "短剧":
|
||||
cats.append(part)
|
||||
return cats
|
||||
|
||||
# 主要接口实现
|
||||
def homeContent(self, filter):
|
||||
"""获取首页分类及内容"""
|
||||
result = {}
|
||||
classes = []
|
||||
|
||||
# 从缓存获取
|
||||
cache_key = 'home_classes'
|
||||
cached_classes = self.getCache(cache_key)
|
||||
if cached_classes:
|
||||
classes = cached_classes
|
||||
else:
|
||||
# 使用预定义的分类
|
||||
for k, v in self.cateManual.items():
|
||||
classes.append({
|
||||
'type_id': v, # 使用完整URL路径作为type_id
|
||||
'type_name': k
|
||||
})
|
||||
|
||||
# 保存到缓存
|
||||
self.setCache(cache_key, classes, 24*3600) # 缓存24小时
|
||||
|
||||
result['class'] = classes
|
||||
|
||||
# 获取首页推荐视频
|
||||
videos = self.homeVideoContent().get('list', [])
|
||||
result['list'] = videos
|
||||
|
||||
return result
|
||||
|
||||
def homeVideoContent(self):
|
||||
"""获取首页推荐视频内容"""
|
||||
result = {'list': []}
|
||||
videos = []
|
||||
|
||||
# 从缓存获取
|
||||
cache_key = 'home_videos'
|
||||
cached_videos = self.getCache(cache_key)
|
||||
if cached_videos:
|
||||
return {'list': cached_videos}
|
||||
|
||||
try:
|
||||
response = self.fetch(self.siteUrl)
|
||||
if response and response.status_code == 200:
|
||||
html = response.text
|
||||
soup = BeautifulSoup(html, 'html.parser')
|
||||
|
||||
# 查找最新更新区域
|
||||
latest_section = soup.find('h2', text=lambda t: t and '最新更新' in t)
|
||||
if latest_section:
|
||||
container = latest_section.parent # 获取容器
|
||||
if container:
|
||||
# 查找所有 li.item 元素
|
||||
items = container.find_all('li', class_='item')
|
||||
|
||||
for item in items:
|
||||
try:
|
||||
# 获取链接和标题
|
||||
title_link = item.find('h3')
|
||||
if not title_link:
|
||||
continue
|
||||
|
||||
title = title_link.text.strip()
|
||||
|
||||
# 获取第一个链接作为详情页链接
|
||||
link_tag = item.find('a')
|
||||
if not link_tag:
|
||||
continue
|
||||
|
||||
link = link_tag.get('href', '')
|
||||
if not link.startswith('http'):
|
||||
link = urllib.parse.urljoin(self.siteUrl, link)
|
||||
|
||||
# 提取ID
|
||||
vid = self.extractVodId(link)
|
||||
if not vid:
|
||||
continue
|
||||
|
||||
# 获取图片
|
||||
img_tag = item.find('img')
|
||||
img_url = ""
|
||||
if img_tag:
|
||||
img_url = img_tag.get('src', img_tag.get('data-src', ''))
|
||||
if img_url and not img_url.startswith('http'):
|
||||
img_url = urllib.parse.urljoin(self.siteUrl, img_url)
|
||||
|
||||
# 获取备注信息
|
||||
remarks = ""
|
||||
remarks_tag = item.find('span', class_='remarks')
|
||||
if remarks_tag:
|
||||
remarks = remarks_tag.text.strip()
|
||||
|
||||
# 获取标签信息
|
||||
tags = ""
|
||||
tags_tag = item.find('span', class_='tags')
|
||||
if tags_tag:
|
||||
tags = tags_tag.text.strip()
|
||||
|
||||
# 合并备注和标签
|
||||
if remarks and tags:
|
||||
remarks = f"{remarks} | {tags}"
|
||||
elif tags:
|
||||
remarks = tags
|
||||
|
||||
# 构建视频项
|
||||
videos.append({
|
||||
'vod_id': vid,
|
||||
'vod_name': title,
|
||||
'vod_pic': img_url,
|
||||
'vod_remarks': remarks
|
||||
})
|
||||
except Exception as e:
|
||||
self.log(f"处理视频项时出错: {str(e)}", "ERROR")
|
||||
continue
|
||||
|
||||
# 保存到缓存
|
||||
self.setCache(cache_key, videos, 3600) # 缓存1小时
|
||||
except Exception as e:
|
||||
self.log(f"获取首页视频内容发生错误: {str(e)}", "ERROR")
|
||||
|
||||
result['list'] = videos
|
||||
return result
|
||||
|
||||
def categoryContent(self, tid, pg, filter, extend):
|
||||
"""获取分类内容"""
|
||||
result = {}
|
||||
videos = []
|
||||
|
||||
# 处理页码
|
||||
if pg is None:
|
||||
pg = 1
|
||||
else:
|
||||
pg = int(pg)
|
||||
|
||||
# 构建分类URL - tid是完整的URL路径
|
||||
if tid.startswith("/"):
|
||||
# 替换页码,URL格式可能像: /index.php/vod/show/class/男频/id/1.html
|
||||
if pg > 1:
|
||||
if "html" in tid:
|
||||
category_url = tid.replace(".html", f"/page/{pg}.html")
|
||||
else:
|
||||
category_url = f"{tid}/page/{pg}.html"
|
||||
else:
|
||||
category_url = tid
|
||||
|
||||
full_url = urllib.parse.urljoin(self.siteUrl, category_url)
|
||||
else:
|
||||
# 如果tid不是URL路径,可能是旧版分类ID,尝试查找对应URL
|
||||
category_url = ""
|
||||
for name, url in self.cateManual.items():
|
||||
if name == tid:
|
||||
category_url = url
|
||||
break
|
||||
|
||||
if not category_url:
|
||||
self.log(f"未找到分类ID对应的URL: {tid}", "ERROR")
|
||||
result['list'] = []
|
||||
result['page'] = pg
|
||||
result['pagecount'] = 1
|
||||
result['limit'] = 0
|
||||
result['total'] = 0
|
||||
return result
|
||||
|
||||
# 处理页码
|
||||
if pg > 1:
|
||||
if "html" in category_url:
|
||||
category_url = category_url.replace(".html", f"/page/{pg}.html")
|
||||
else:
|
||||
category_url = f"{category_url}/page/{pg}.html"
|
||||
|
||||
full_url = urllib.parse.urljoin(self.siteUrl, category_url)
|
||||
|
||||
# 请求分类页
|
||||
try:
|
||||
response = self.fetch(full_url)
|
||||
if response and response.status_code == 200:
|
||||
html = response.text
|
||||
soup = BeautifulSoup(html, 'html.parser')
|
||||
|
||||
# 查找视频项,根据实际HTML结构调整
|
||||
items = soup.find_all('li', class_='item')
|
||||
|
||||
for item in items:
|
||||
try:
|
||||
# 获取链接和标题
|
||||
title_tag = item.find('h3')
|
||||
if not title_tag:
|
||||
continue
|
||||
|
||||
title = title_tag.text.strip()
|
||||
|
||||
# 获取链接
|
||||
link_tag = item.find('a')
|
||||
if not link_tag:
|
||||
continue
|
||||
|
||||
link = link_tag.get('href', '')
|
||||
if not link.startswith('http'):
|
||||
link = urllib.parse.urljoin(self.siteUrl, link)
|
||||
|
||||
# 提取ID
|
||||
vid = self.extractVodId(link)
|
||||
if not vid:
|
||||
continue
|
||||
|
||||
# 获取图片
|
||||
img_tag = item.find('img')
|
||||
img_url = ""
|
||||
if img_tag:
|
||||
img_url = img_tag.get('src', img_tag.get('data-src', ''))
|
||||
if img_url and not img_url.startswith('http'):
|
||||
img_url = urllib.parse.urljoin(self.siteUrl, img_url)
|
||||
|
||||
# 获取备注信息
|
||||
remarks = ""
|
||||
remarks_tag = item.find('span', class_='remarks')
|
||||
if remarks_tag:
|
||||
remarks = remarks_tag.text.strip()
|
||||
|
||||
# 获取标签信息
|
||||
tags = ""
|
||||
tags_tag = item.find('span', class_='tags')
|
||||
if tags_tag:
|
||||
tags = tags_tag.text.strip()
|
||||
|
||||
# 合并备注和标签
|
||||
if remarks and tags:
|
||||
remarks = f"{remarks} | {tags}"
|
||||
elif tags:
|
||||
remarks = tags
|
||||
|
||||
# 构建视频项
|
||||
videos.append({
|
||||
'vod_id': vid,
|
||||
'vod_name': title,
|
||||
'vod_pic': img_url,
|
||||
'vod_remarks': remarks
|
||||
})
|
||||
except Exception as e:
|
||||
self.log(f"处理分类视频项时出错: {str(e)}", "ERROR")
|
||||
continue
|
||||
|
||||
# 查找分页信息
|
||||
# 默认值
|
||||
total = len(videos)
|
||||
pagecount = 1
|
||||
limit = 20
|
||||
|
||||
# 尝试查找分页元素
|
||||
pagination = soup.find('ul', class_='page')
|
||||
if pagination:
|
||||
# 查找最后一页的链接
|
||||
last_page_links = pagination.find_all('a')
|
||||
for link in last_page_links:
|
||||
page_text = link.text.strip()
|
||||
if page_text.isdigit():
|
||||
pagecount = max(pagecount, int(page_text))
|
||||
except Exception as e:
|
||||
self.log(f"获取分类内容发生错误: {str(e)}", "ERROR")
|
||||
|
||||
result['list'] = videos
|
||||
result['page'] = pg
|
||||
result['pagecount'] = pagecount
|
||||
result['limit'] = limit
|
||||
result['total'] = total
|
||||
|
||||
return result
|
||||
|
||||
def detailContent(self, ids):
|
||||
"""获取详情内容"""
|
||||
result = {}
|
||||
|
||||
if not ids or len(ids) == 0:
|
||||
return result
|
||||
|
||||
# 视频ID
|
||||
vid = ids[0]
|
||||
|
||||
# 构建播放页URL
|
||||
play_url = f"{self.siteUrl}/index.php/vod/play/id/{vid}/sid/1/nid/1.html"
|
||||
|
||||
try:
|
||||
response = self.fetch(play_url)
|
||||
if not response or response.status_code != 200:
|
||||
return result
|
||||
|
||||
html = response.text
|
||||
soup = BeautifulSoup(html, 'html.parser')
|
||||
|
||||
# 提取视频基本信息
|
||||
# 标题
|
||||
title = ""
|
||||
title_tag = soup.find('h1', class_='items-title')
|
||||
if title_tag:
|
||||
title = title_tag.text.strip()
|
||||
|
||||
# 图片
|
||||
pic = ""
|
||||
pic_tag = soup.find('img', class_='thumb')
|
||||
if pic_tag:
|
||||
pic = pic_tag.get('src', '')
|
||||
if pic and not pic.startswith('http'):
|
||||
pic = urllib.parse.urljoin(self.siteUrl, pic)
|
||||
|
||||
# 简介
|
||||
desc = ""
|
||||
desc_tag = soup.find('div', class_='text-content')
|
||||
if desc_tag:
|
||||
desc = desc_tag.text.strip()
|
||||
|
||||
# 标签/分类
|
||||
tags = []
|
||||
tags_container = soup.find('span', class_='items-tags')
|
||||
if tags_container:
|
||||
tag_links = tags_container.find_all('a')
|
||||
for tag in tag_links:
|
||||
tag_text = tag.text.strip()
|
||||
if tag_text:
|
||||
tags.append(tag_text)
|
||||
|
||||
# 提取播放列表
|
||||
play_from = "偷乐短剧"
|
||||
play_list = []
|
||||
|
||||
# 查找播放列表区域
|
||||
play_area = soup.find('div', class_='swiper-wrapper')
|
||||
if play_area:
|
||||
# 查找所有剧集链接
|
||||
episode_links = play_area.find_all('a')
|
||||
for ep in episode_links:
|
||||
ep_title = ep.text.strip()
|
||||
ep_url = ep.get('href', '')
|
||||
|
||||
if ep_url:
|
||||
# 直接使用URL作为ID
|
||||
if not ep_url.startswith('http'):
|
||||
ep_url = urllib.parse.urljoin(self.siteUrl, ep_url)
|
||||
|
||||
# 提取集数信息
|
||||
ep_num = ep_title
|
||||
if ep_num.isdigit():
|
||||
ep_num = f"第{ep_num}集"
|
||||
|
||||
play_list.append(f"{ep_num}${ep_url}")
|
||||
|
||||
# 如果没有找到播放列表,查找播放按钮
|
||||
if not play_list:
|
||||
play_btn = soup.find('a', class_='btn-play')
|
||||
if play_btn:
|
||||
play_url = play_btn.get('href', '')
|
||||
if play_url:
|
||||
if not play_url.startswith('http'):
|
||||
play_url = urllib.parse.urljoin(self.siteUrl, play_url)
|
||||
|
||||
play_list.append(f"播放${play_url}")
|
||||
|
||||
# 如果仍然没有找到播放链接,使用播放页URL
|
||||
if not play_list:
|
||||
play_url = f"{self.siteUrl}/index.php/vod/play/id/{vid}/sid/1/nid/1.html"
|
||||
play_list.append(f"播放${play_url}")
|
||||
|
||||
# 提取更多信息(导演、演员等)
|
||||
director = ""
|
||||
actor = ""
|
||||
year = ""
|
||||
area = ""
|
||||
remarks = ""
|
||||
|
||||
# 查找备注信息
|
||||
meta_items = soup.find_all('div', class_='meta-item')
|
||||
for item in meta_items:
|
||||
item_title = item.find('span', class_='item-title')
|
||||
item_content = item.find('span', class_='item-content')
|
||||
|
||||
if item_title and item_content:
|
||||
title_text = item_title.text.strip()
|
||||
content_text = item_content.text.strip()
|
||||
|
||||
if "导演" in title_text:
|
||||
director = content_text
|
||||
elif "主演" in title_text:
|
||||
actor = content_text
|
||||
elif "年份" in title_text:
|
||||
year = content_text
|
||||
elif "地区" in title_text:
|
||||
area = content_text
|
||||
elif "简介" in title_text:
|
||||
if not desc:
|
||||
desc = content_text
|
||||
elif "状态" in title_text:
|
||||
remarks = content_text
|
||||
|
||||
# 如果没有从meta-item中获取到remarks
|
||||
if not remarks:
|
||||
remarks_tag = soup.find('span', class_='remarks')
|
||||
if remarks_tag:
|
||||
remarks = remarks_tag.text.strip()
|
||||
|
||||
# 构建标准数据结构
|
||||
vod = {
|
||||
"vod_id": vid,
|
||||
"vod_name": title,
|
||||
"vod_pic": pic,
|
||||
"vod_year": year,
|
||||
"vod_area": area,
|
||||
"vod_remarks": remarks,
|
||||
"vod_actor": actor,
|
||||
"vod_director": director,
|
||||
"vod_content": desc,
|
||||
"type_name": ",".join(tags),
|
||||
"vod_play_from": play_from,
|
||||
"vod_play_url": "#".join(play_list)
|
||||
}
|
||||
|
||||
result = {
|
||||
'list': [vod]
|
||||
}
|
||||
except Exception as e:
|
||||
self.log(f"获取详情内容时出错: {str(e)}", "ERROR")
|
||||
|
||||
return result
|
||||
|
||||
def searchContent(self, key, quick, pg=1):
|
||||
"""搜索功能"""
|
||||
result = {}
|
||||
videos = []
|
||||
|
||||
# 构建搜索URL和参数
|
||||
search_url = f"{self.siteUrl}/index.php/vod/search.html"
|
||||
params = {"wd": key}
|
||||
|
||||
try:
|
||||
response = self.fetch(search_url, data=params)
|
||||
if response and response.status_code == 200:
|
||||
html = response.text
|
||||
soup = BeautifulSoup(html, 'html.parser')
|
||||
|
||||
# 查找搜索结果项
|
||||
search_items = soup.find_all('li', class_='item')
|
||||
|
||||
for item in search_items:
|
||||
try:
|
||||
# 获取标题
|
||||
title_tag = item.find('h3')
|
||||
if not title_tag:
|
||||
continue
|
||||
|
||||
title = title_tag.text.strip()
|
||||
|
||||
# 获取链接
|
||||
link_tag = item.find('a')
|
||||
if not link_tag:
|
||||
continue
|
||||
|
||||
link = link_tag.get('href', '')
|
||||
if not link.startswith('http'):
|
||||
link = urllib.parse.urljoin(self.siteUrl, link)
|
||||
|
||||
# 提取视频ID
|
||||
vid = self.extractVodId(link)
|
||||
if not vid:
|
||||
continue
|
||||
|
||||
# 获取图片
|
||||
img_tag = item.find('img')
|
||||
img_url = ""
|
||||
if img_tag:
|
||||
img_url = img_tag.get('src', img_tag.get('data-src', ''))
|
||||
if img_url and not img_url.startswith('http'):
|
||||
img_url = urllib.parse.urljoin(self.siteUrl, img_url)
|
||||
|
||||
# 获取备注信息
|
||||
remarks = ""
|
||||
remarks_tag = item.find('span', class_='remarks')
|
||||
if remarks_tag:
|
||||
remarks = remarks_tag.text.strip()
|
||||
|
||||
# 获取标签信息
|
||||
tags = ""
|
||||
tags_tag = item.find('span', class_='tags')
|
||||
if tags_tag:
|
||||
tags = tags_tag.text.strip()
|
||||
|
||||
# 合并备注和标签
|
||||
if remarks and tags:
|
||||
remarks = f"{remarks} | {tags}"
|
||||
elif tags:
|
||||
remarks = tags
|
||||
|
||||
# 构建视频项
|
||||
videos.append({
|
||||
'vod_id': vid,
|
||||
'vod_name': title,
|
||||
'vod_pic': img_url,
|
||||
'vod_remarks': remarks
|
||||
})
|
||||
except Exception as e:
|
||||
self.log(f"处理搜索结果时出错: {str(e)}", "ERROR")
|
||||
continue
|
||||
except Exception as e:
|
||||
self.log(f"搜索功能发生错误: {str(e)}", "ERROR")
|
||||
|
||||
result['list'] = videos
|
||||
return result
|
||||
|
||||
def searchContentPage(self, key, quick, pg=1):
|
||||
return self.searchContent(key, quick, pg)
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
"""获取播放内容"""
|
||||
result = {}
|
||||
|
||||
try:
|
||||
# 判断是否已经是视频URL
|
||||
if self.isVideoFormat(id):
|
||||
result["parse"] = 0
|
||||
result["url"] = id
|
||||
result["playUrl"] = ""
|
||||
result["header"] = json.dumps(self.headers)
|
||||
return result
|
||||
|
||||
# 判断是否是完整的页面URL
|
||||
if id.startswith(('http://', 'https://')):
|
||||
play_url = id
|
||||
# 尝试作为相对路径处理
|
||||
elif id.startswith('/'):
|
||||
play_url = urllib.parse.urljoin(self.siteUrl, id)
|
||||
# 假设是视频ID,构建播放页面URL
|
||||
else:
|
||||
# 检查是否是"视频ID_集数"格式
|
||||
parts = id.split('_')
|
||||
if len(parts) > 1 and parts[0].isdigit():
|
||||
vid = parts[0]
|
||||
nid = parts[1]
|
||||
play_url = f"{self.siteUrl}/index.php/vod/play/id/{vid}/sid/1/nid/{nid}.html"
|
||||
else:
|
||||
# 直接当作视频ID处理
|
||||
play_url = f"{self.siteUrl}/index.php/vod/play/id/{id}/sid/1/nid/1.html"
|
||||
|
||||
# 访问播放页获取真实播放地址
|
||||
try:
|
||||
self.log(f"正在解析播放页面: {play_url}")
|
||||
response = self.fetch(play_url)
|
||||
if response and response.status_code == 200:
|
||||
html = response.text
|
||||
|
||||
# 查找player_aaaa变量
|
||||
player_match = re.search(r'var\s+player_aaaa\s*=\s*({.*?});', html, re.DOTALL)
|
||||
if player_match:
|
||||
try:
|
||||
player_data = json.loads(player_match.group(1))
|
||||
if 'url' in player_data:
|
||||
video_url = player_data['url']
|
||||
if not video_url.startswith('http'):
|
||||
video_url = urllib.parse.urljoin(self.siteUrl, video_url)
|
||||
|
||||
self.log(f"从player_aaaa获取到视频地址: {video_url}")
|
||||
result["parse"] = 0
|
||||
result["url"] = video_url
|
||||
result["playUrl"] = ""
|
||||
result["header"] = json.dumps(self.headers)
|
||||
return result
|
||||
except json.JSONDecodeError as e:
|
||||
self.log(f"解析player_aaaa JSON出错: {str(e)}", "ERROR")
|
||||
|
||||
# 如果player_aaaa解析失败,尝试其他方式
|
||||
# 1. 查找video标签
|
||||
video_match = re.search(r'<video[^>]*src=["\'](.*?)["\']', html)
|
||||
if video_match:
|
||||
video_url = video_match.group(1)
|
||||
if not video_url.startswith('http'):
|
||||
video_url = urllib.parse.urljoin(self.siteUrl, video_url)
|
||||
|
||||
self.log(f"从video标签找到视频地址: {video_url}")
|
||||
result["parse"] = 0
|
||||
result["url"] = video_url
|
||||
result["playUrl"] = ""
|
||||
result["header"] = json.dumps(self.headers)
|
||||
return result
|
||||
|
||||
# 2. 查找iframe
|
||||
iframe_match = re.search(r'<iframe[^>]*src=["\'](.*?)["\']', html)
|
||||
if iframe_match:
|
||||
iframe_url = iframe_match.group(1)
|
||||
if not iframe_url.startswith('http'):
|
||||
iframe_url = urllib.parse.urljoin(self.siteUrl, iframe_url)
|
||||
|
||||
self.log(f"找到iframe,正在解析: {iframe_url}")
|
||||
# 访问iframe内容
|
||||
iframe_response = self.fetch(iframe_url)
|
||||
if iframe_response and iframe_response.status_code == 200:
|
||||
iframe_html = iframe_response.text
|
||||
|
||||
# 在iframe内容中查找视频地址
|
||||
iframe_video_match = re.search(r'(https?://[^\'"]+\.(mp4|m3u8|ts))', iframe_html)
|
||||
if iframe_video_match:
|
||||
video_url = iframe_video_match.group(1)
|
||||
|
||||
self.log(f"从iframe中找到视频地址: {video_url}")
|
||||
result["parse"] = 0
|
||||
result["url"] = video_url
|
||||
result["playUrl"] = ""
|
||||
result["header"] = json.dumps({
|
||||
"User-Agent": self.headers["User-Agent"],
|
||||
"Referer": iframe_url
|
||||
})
|
||||
return result
|
||||
|
||||
# 3. 查找任何可能的视频URL
|
||||
url_match = re.search(r'(https?://[^\'"]+\.(mp4|m3u8|ts))', html)
|
||||
if url_match:
|
||||
video_url = url_match.group(1)
|
||||
|
||||
self.log(f"找到可能的视频地址: {video_url}")
|
||||
result["parse"] = 0
|
||||
result["url"] = video_url
|
||||
result["playUrl"] = ""
|
||||
result["header"] = json.dumps(self.headers)
|
||||
return result
|
||||
except Exception as e:
|
||||
self.log(f"解析播放地址时出错: {str(e)}", "ERROR")
|
||||
|
||||
# 如果所有方式都失败,返回外部解析标志
|
||||
self.log("未找到直接可用的视频地址,需要外部解析", "WARNING")
|
||||
result["parse"] = 1 # 表示需要外部解析
|
||||
result["url"] = play_url # 返回播放页面URL
|
||||
result["playUrl"] = ""
|
||||
result["header"] = json.dumps(self.headers)
|
||||
|
||||
except Exception as e:
|
||||
self.log(f"获取播放内容时出错: {str(e)}", "ERROR")
|
||||
|
||||
return result
|
||||
|
||||
def localProxy(self, param):
|
||||
"""本地代理"""
|
||||
return [404, "text/plain", {}, "Not Found"]
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
1
摸鱼儿/js/56DM.js
Normal file
1
摸鱼儿/js/56DM.js
Normal file
@@ -0,0 +1 @@
|
||||
dmFyIHJ1bGUgPSB7CiAgICB0aXRsZTogJzU25Yqo5ryrJywKICAgIGhvc3Q6ICdodHRwczovL3d3dy41NmRtLmNjLycsCiAgICB1cmw6ICdodHRwczovL3d3dy41NmRtLmNjL3R5cGUvZnljbGFzcy1meXBhZ2UuaHRtbCcsCiAgICBzZWFyY2hVcmw6ICdodHRwczovL3d3dy41NmRtLmNjL3NlYXJjaC8qKi0tLS0tLS0tLS1meXBhZ2UtLS0uaHRtbCcsCiAgICBzZWFyY2hhYmxlOiAyLCAvL+aYr+WQpuWQr+eUqOWFqOWxgOaQnOe0oiwKICAgIHF1aWNrU2VhcmNoOiAwLCAvL+aYr+WQpuWQr+eUqOW/q+mAn+aQnOe0oiwKICAgIGZpbHRlcmFibGU6IDAsIC8v5piv5ZCm5ZCv55So5YiG57G7562b6YCJLAogICAgaGVhZGVyczogewogICAgICAgICdVc2VyLUFnZW50JzogJ1VDX1VBJywgLy8gIkNvb2tpZSI6ICIiCiAgICB9LCAvLyBjbGFzc19wYXJzZTonLnN0dWktaGVhZGVyX19tZW51IGxpOmd0KDApOmx0KDcpO2EmJlRleHQ7YSYmaHJlZjsvKFxcZCspLmh0bWwnLAogICAgY2xhc3NfcGFyc2U6ICcuc251aS1oZWFkZXItbWVudS1uYXYgbGk6Z3QoMCk6bHQoNik7YSYmVGV4dDthJiZocmVmOy4qLyguKj8pLmh0bWwnLAogICAgcGxheV9wYXJzZTogdHJ1ZSwKICAgIGxhenk6IGBqczoKICAgICAgICAgICAgaWYoL1xcLihtM3U4fG1wNCkvLnRlc3QoaW5wdXQpKXsKICAgICAgICAgICAgICAgIGlucHV0ID0ge3BhcnNlOjAsdXJsOmlucHV0fQogICAgICAgICAgICB9ZWxzZXsKICAgICAgICAgICAgICAgIGlmKHJ1bGUucGFyc2VfdXJsLnN0YXJ0c1dpdGgoJ2pzb246JykpewogICAgICAgICAgICAgICAgICAgIGxldCBwdXJsID0gcnVsZS5wYXJzZV91cmwucmVwbGFjZSgnanNvbjonLCcnKStpbnB1dDsKICAgICAgICAgICAgICAgICAgICBsZXQgaHRtbCA9IHJlcXVlc3QocHVybCk7CiAgICAgICAgICAgICAgICAgICAgaW5wdXQgPSB7cGFyc2U6MCx1cmw6SlNPTi5wYXJzZShodG1sKS51cmx9CiAgICAgICAgICAgICAgICB9ZWxzZXsKICAgICAgICAgICAgICAgICAgICBpbnB1dD0gcnVsZS5wYXJzZV91cmwraW5wdXQ7IAogICAgICAgICAgICAgICAgfQogICAgICAgICAgICB9CiAgICAgICAgICAgIGAsCiAgICBsaW1pdDogNiwKICAgIOaOqOiNkDogJy5jQ0JmX0ZBQUVmYmM7bGk7YSYmdGl0bGU7Lmxhenlsb2FkJiZkYXRhLW9yaWdpbmFsOy5kQURfQkJDSSYmVGV4dDthJiZocmVmJywKICAgIGRvdWJsZTogdHJ1ZSwgLy8g5o6o6I2Q5YaF5a655piv5ZCm5Y+M5bGC5a6a5L2NCiAgICDkuIDnuqc6ICcuY0NCZl9GQUFFZmJjIGxpO2EmJnRpdGxlO2EmJmRhdGEtb3JpZ2luYWw7LmRBRF9CQkNJJiZUZXh0O2EmJmhyZWYnLAogICAg5LqM57qnOiB7CiAgICAgICAgInRpdGxlIjogImgxJiZUZXh0IiwKICAgICAgICAiaW1nIjogIi5zdHVpLWNvbnRlbnRfX3RodW1iIC5sYXp5bG9hZCYmZGF0YS1vcmlnaW5hbCIsCiAgICAgICAgImRlc2MiOiAiLmNDQmZfREFCQ2NhY19faGNJZGVFIHA6ZXEoMCkmJlRleHQ7LmNDQmZfREFCQ2NhY19faGNJZGVFIHA6ZXEoMSkmJlRleHQ7LmNDQmZfREFCQ2NhY19faGNJZGVFIHA6ZXEoMikmJlRleHQ7LmNDQmZfREFCQ2NhY19faGNJZGVFIHA6ZXEoMykmJlRleHQ7LmNDQmZfREFCQ2NhY19faGNJZGVFIHA6ZXEoNCkmJlRleHQiLAogICAgICAgICJjb250ZW50IjogIi5kZXRhaWwmJlRleHQiLAogICAgICAgICJ0YWJzIjogIi5jaGFubmVsLXRhYiBsaSIsCiAgICAgICAgImxpc3RzIjogIi5wbGF5LWxpc3QtY29udGVudDplcSgjaWQpIGxpIgogICAgfSwKICAgIOaQnOe0ojogJy5jQ0JmX0ZBQUVmYmNfX2RiRDthJiZ0aXRsZTsubGF6eWxvYWQmJmRhdGEtb3JpZ2luYWw7LmRBRF9CQkNJJiZUZXh0O2EmJmhyZWY7LmNDQmZfRkFBRWZiY19faGNJZGVFJiZwOmVxKDApIHAmJlRleHQnLAp9
|
||||
1
摸鱼儿/js/NTDM.js
Normal file
1
摸鱼儿/js/NTDM.js
Normal file
File diff suppressed because one or more lines are too long
30
摸鱼儿/json/kf.json
Normal file
30
摸鱼儿/json/kf.json
Normal file
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"SiteUrl": "https://www.kuafuzy.com,https://www.kfzy.cc",
|
||||
"Classes": [
|
||||
{
|
||||
"type_name": "电影",
|
||||
"type_id": "1"
|
||||
},
|
||||
{
|
||||
"type_name": "剧集",
|
||||
"type_id": "2"
|
||||
},
|
||||
{
|
||||
"type_name": "4K电影",
|
||||
"type_id": "3"
|
||||
},
|
||||
{
|
||||
"type_name": "4K剧集",
|
||||
"type_id": "4"
|
||||
},
|
||||
{
|
||||
"type_name": "动漫",
|
||||
"type_id": "5"
|
||||
},
|
||||
{
|
||||
"type_name": "短剧",
|
||||
"type_id": "6"
|
||||
}
|
||||
],
|
||||
"Cookie": "bbs_token=zNQpYs_2BmC2e_2FcUM_2BmuihZ33Jswh_2Fj7sPtelqcw_3D_3D; bbs_sid=lgs96gh42gevj7lsg5f8o3kjsi"
|
||||
}
|
||||
Reference in New Issue
Block a user