Update PY/51吸瓜.py, PY/APPV2.py, PY/hitvAPP.py, PY/lavAPP.py, PY/LIVES.py, PY/LREEOK.py, PY/Phb.py, PY/Xhm.py, PY/Xvd.py, PY/爱.py, PY/爱瓜TVAPP.py, PY/光速APP.py, PY/红果网页.py, PY/金牌.py, PY/火车太顺APP.py, PY/芒.py, PY/剧多短剧APP.py, PY/绝对影视.py, PY/美帕APP.py, PY/胖虎APP.py, PY/三号动漫APP.py, PY/视觉APP.py, PY/腾.py, PY/甜圈短剧.py, PY/推特APP.py, PY/香蕉APP.py, PY/小苹果APP.py, PY/小红薯APP.py, PY/悠悠APP.py, PY/优.py, PY/浴火社APP.py files
This commit is contained in:
parent
f9e7ce40ac
commit
cd4866c522
219
PY/51吸瓜.py
Normal file
219
PY/51吸瓜.py
Normal file
@ -0,0 +1,219 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# by @嗷呜
|
||||
import json
|
||||
import random
|
||||
import re
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
from base64 import b64decode
|
||||
import requests
|
||||
from Crypto.Cipher import AES
|
||||
from Crypto.Util.Padding import unpad
|
||||
from pyquery import PyQuery as pq
|
||||
sys.path.append('..')
|
||||
from base.spider import Spider
|
||||
|
||||
|
||||
class Spider(Spider):
|
||||
|
||||
def init(self, extend=""):
|
||||
self.host=self.host_late(self.get_domains())
|
||||
pass
|
||||
|
||||
def getName(self):
|
||||
pass
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
pass
|
||||
|
||||
def manualVideoCheck(self):
|
||||
pass
|
||||
|
||||
def destroy(self):
|
||||
pass
|
||||
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
|
||||
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
|
||||
'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="134", "Google Chrome";v="134"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-platform': '"macOS"',
|
||||
'dnt': '1',
|
||||
'upgrade-insecure-requests': '1',
|
||||
'sec-fetch-site': 'cross-site',
|
||||
'sec-fetch-mode': 'navigate',
|
||||
'sec-fetch-user': '?1',
|
||||
'sec-fetch-dest': 'document',
|
||||
'accept-language': 'zh-CN,zh;q=0.9',
|
||||
'priority': 'u=0, i'
|
||||
}
|
||||
|
||||
def homeContent(self, filter):
|
||||
data=self.getpq(self.fetch(self.host, headers=self.headers).text)
|
||||
result = {}
|
||||
classes = []
|
||||
for k in data('.category-list ul li').items():
|
||||
classes.append({
|
||||
'type_name': k('a').text(),
|
||||
'type_id': k('a').attr('href')
|
||||
})
|
||||
result['class'] = classes
|
||||
result['list'] = self.getlist(data('#index article a'))
|
||||
return result
|
||||
|
||||
def homeVideoContent(self):
|
||||
pass
|
||||
|
||||
def categoryContent(self, tid, pg, filter, extend):
|
||||
data=self.getpq(self.fetch(f"{self.host}{tid}{pg}", headers=self.headers).text)
|
||||
result = {}
|
||||
result['list'] = self.getlist(data('#archive article a'))
|
||||
result['page'] = pg
|
||||
result['pagecount'] = 9999
|
||||
result['limit'] = 90
|
||||
result['total'] = 999999
|
||||
return result
|
||||
|
||||
def detailContent(self, ids):
|
||||
url=f"{self.host}{ids[0]}"
|
||||
data=self.getpq(self.fetch(url, headers=self.headers).text)
|
||||
vod = {'vod_play_from': '51吸瓜'}
|
||||
try:
|
||||
clist = []
|
||||
if data('.tags .keywords a'):
|
||||
for k in data('.tags .keywords a').items():
|
||||
title = k.text()
|
||||
href = k.attr('href')
|
||||
clist.append('[a=cr:' + json.dumps({'id': href, 'name': title}) + '/]' + title + '[/a]')
|
||||
vod['vod_content'] = ' '.join(clist)
|
||||
except:
|
||||
vod['vod_content'] = data('.post-title').text()
|
||||
try:
|
||||
plist=[]
|
||||
if data('.dplayer'):
|
||||
for c, k in enumerate(data('.dplayer').items(), start=1):
|
||||
config = json.loads(k.attr('data-config'))
|
||||
plist.append(f"视频{c}${config['video']['url']}")
|
||||
vod['vod_play_url']='#'.join(plist)
|
||||
except:
|
||||
vod['vod_play_url']=f"请停止活塞运动,可能没有视频${url}"
|
||||
return {'list':[vod]}
|
||||
|
||||
def searchContent(self, key, quick, pg="1"):
|
||||
data=self.getpq(self.fetch(f"{self.host}/search/{key}/{pg}", headers=self.headers).text)
|
||||
return {'list':self.getlist(data('#archive article a')),'page':pg}
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
|
||||
'Pragma': 'no-cache',
|
||||
'Cache-Control': 'no-cache',
|
||||
'sec-ch-ua-platform': '"macOS"',
|
||||
'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="134", "Google Chrome";v="134"',
|
||||
'DNT': '1',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'Origin': self.host,
|
||||
'Sec-Fetch-Site': 'cross-site',
|
||||
'Sec-Fetch-Mode': 'cors',
|
||||
'Sec-Fetch-Dest': 'empty',
|
||||
'Accept-Language': 'zh-CN,zh;q=0.9',
|
||||
}
|
||||
return {'parse': 1, 'url': id, 'header': headers}
|
||||
|
||||
def localProxy(self, param):
|
||||
res=self.fetch(param['url'], headers=self.headers, timeout=10)
|
||||
return [200,res.headers.get('Content-Type'),self.aesimg(res.content)]
|
||||
|
||||
def get_domains(self):
|
||||
html = self.getpq(self.fetch("https://51cg.fun", headers=self.headers).text)
|
||||
html_pattern = r"Base64\.decode\('([^']+)'\)"
|
||||
html_match = re.search(html_pattern, html('script').eq(-1).text(), re.DOTALL)
|
||||
if not html_match:
|
||||
raise Exception("未找到html")
|
||||
html = b64decode(html_match.group(1)).decode()
|
||||
words_pattern = r"words\s*=\s*'([^']+)'"
|
||||
words_match = re.search(words_pattern, html, re.DOTALL)
|
||||
if not words_match:
|
||||
raise Exception("未找到words")
|
||||
words = words_match.group(1).split(',')
|
||||
main_pattern = r"lineAry\s*=.*?words\.random\(\)\s*\+\s*'\.([^']+)'"
|
||||
domain_match = re.search(main_pattern, html, re.DOTALL)
|
||||
if not domain_match:
|
||||
raise Exception("未找到主域名")
|
||||
domain_suffix = domain_match.group(1)
|
||||
domains = []
|
||||
for _ in range(3):
|
||||
random_word = random.choice(words)
|
||||
domain = f"https://{random_word}.{domain_suffix}"
|
||||
domains.append(domain)
|
||||
return domains
|
||||
|
||||
def host_late(self, url_list):
|
||||
if isinstance(url_list, str):
|
||||
urls = [u.strip() for u in url_list.split(',')]
|
||||
else:
|
||||
urls = url_list
|
||||
|
||||
if len(urls) <= 1:
|
||||
return urls[0] if urls else ''
|
||||
|
||||
results = {}
|
||||
threads = []
|
||||
|
||||
def test_host(url):
|
||||
try:
|
||||
start_time = time.time()
|
||||
response = requests.head(url, timeout=1.0, allow_redirects=False)
|
||||
delay = (time.time() - start_time) * 1000
|
||||
results[url] = delay
|
||||
except Exception as e:
|
||||
results[url] = float('inf')
|
||||
|
||||
for url in urls:
|
||||
t = threading.Thread(target=test_host, args=(url,))
|
||||
threads.append(t)
|
||||
t.start()
|
||||
|
||||
for t in threads:
|
||||
t.join()
|
||||
|
||||
return min(results.items(), key=lambda x: x[1])[0]
|
||||
|
||||
def getlist(self,data):
|
||||
videos = []
|
||||
for k in data.items():
|
||||
a=k.attr('href')
|
||||
b=k('h2').text()
|
||||
c=k('span[itemprop="datePublished"]').text()
|
||||
if a and b and c:
|
||||
videos.append({
|
||||
'vod_id': a,
|
||||
'vod_name': b.replace('\n', ' '),
|
||||
'vod_pic': self.getimg(k('script').text()),
|
||||
'vod_remarks': c,
|
||||
'style': {"type": "rect", "ratio": 1.33}
|
||||
})
|
||||
return videos
|
||||
|
||||
def getimg(self, text):
|
||||
match = re.search(r"loadBannerDirect\('([^']+)'", text)
|
||||
if match:
|
||||
url = match.group(1)
|
||||
return f"{self.getProxyUrl()}&url={url}&type=img"
|
||||
else:
|
||||
return ''
|
||||
|
||||
def aesimg(self, word):
|
||||
key = b'f5d965df75336270'
|
||||
iv = b'97b60394abc2fbe1'
|
||||
cipher = AES.new(key, AES.MODE_CBC, iv)
|
||||
decrypted = unpad(cipher.decrypt(word), AES.block_size)
|
||||
return decrypted
|
||||
|
||||
def getpq(self, data):
|
||||
try:
|
||||
return pq(data)
|
||||
except Exception as e:
|
||||
print(f"{str(e)}")
|
||||
return pq(data.encode('utf-8'))
|
97
PY/APPV2.py
Normal file
97
PY/APPV2.py
Normal file
@ -0,0 +1,97 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# by @嗷呜
|
||||
import sys
|
||||
sys.path.append('..')
|
||||
from base.spider import Spider
|
||||
|
||||
class Spider(Spider):
|
||||
|
||||
def init(self, extend=""):
|
||||
'''
|
||||
example:
|
||||
{
|
||||
"key": "py_appV2",
|
||||
"name": "xxx",
|
||||
"type": 3,
|
||||
"searchable": 1,
|
||||
"quickSearch": 1,
|
||||
"filterable": 1,
|
||||
"api": "./py/APPV2.py",
|
||||
"ext": "http://cmsyt.lyyytv.cn"
|
||||
}
|
||||
|
||||
'''
|
||||
self.host=extend
|
||||
pass
|
||||
|
||||
def getName(self):
|
||||
pass
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
pass
|
||||
|
||||
def manualVideoCheck(self):
|
||||
pass
|
||||
|
||||
def destroy(self):
|
||||
pass
|
||||
|
||||
headers = {
|
||||
'User-Agent': 'okhttp/4.12.0',
|
||||
}
|
||||
|
||||
def homeContent(self, filter):
|
||||
data = self.fetch(f"{self.host}//api.php/app/nav?token=",headers=self.headers).json()
|
||||
keys = ["class", "area", "lang", "year", "letter", "by", "sort"]
|
||||
filters = {}
|
||||
classes = []
|
||||
for item in data['list']:
|
||||
has_non_empty_field = False
|
||||
jsontype_extend = item["type_extend"]
|
||||
classes.append({"type_name": item["type_name"], "type_id": item["type_id"]})
|
||||
for key in keys:
|
||||
if key in jsontype_extend and jsontype_extend[key].strip() != "":
|
||||
has_non_empty_field = True
|
||||
break
|
||||
if has_non_empty_field:
|
||||
filters[str(item["type_id"])] = []
|
||||
for dkey in jsontype_extend:
|
||||
if dkey in keys and jsontype_extend[dkey].strip() != "":
|
||||
values = jsontype_extend[dkey].split(",")
|
||||
value_array = [{"n": value.strip(), "v": value.strip()} for value in values if
|
||||
value.strip() != ""]
|
||||
filters[str(item["type_id"])].append({"key": dkey, "name": dkey, "value": value_array})
|
||||
result = {}
|
||||
result["class"] = classes
|
||||
result["filters"] = filters
|
||||
return result
|
||||
|
||||
def homeVideoContent(self):
|
||||
data=self.fetch(f"{self.host}/api.php/app/index_video?token=",headers=self.headers).json()
|
||||
videos=[]
|
||||
for item in data['list']:videos.extend(item['vlist'])
|
||||
return {'list':videos}
|
||||
|
||||
def categoryContent(self, tid, pg, filter, extend):
|
||||
params = {'tid':tid,'class':extend.get('class',''),'area':extend.get('area',''),'lang':extend.get('lang',''),'year':extend.get('year',''),'limit':'18','pg':pg}
|
||||
data=self.fetch(f"{self.host}/api.php/app/video",params=params,headers=self.headers).json()
|
||||
return data
|
||||
|
||||
def detailContent(self, ids):
|
||||
data=self.fetch(f"{self.host}/api.php/app/video_detail?id={ids[0]}",headers=self.headers).json()
|
||||
return {'list':[data['data']]}
|
||||
|
||||
def searchContent(self, key, quick, pg="1"):
|
||||
data=self.fetch(f"{self.host}/api.php/app/search?text={key}&pg={pg}",headers=self.headers).json()
|
||||
videos=data['list']
|
||||
for item in data['list']:
|
||||
item.pop('type', None)
|
||||
return {'list':videos,'page':pg}
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
return {'jx':1,'playUrl':'','parse': 1, 'url': id, 'header': self.headers}
|
||||
|
||||
def localProxy(self, param):
|
||||
pass
|
||||
|
||||
|
768
PY/LIVES.py
Normal file
768
PY/LIVES.py
Normal file
@ -0,0 +1,768 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# by @嗷呜
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
from base64 import b64decode, b64encode
|
||||
from urllib.parse import parse_qs
|
||||
import requests
|
||||
from pyquery import PyQuery as pq
|
||||
sys.path.append('..')
|
||||
from base.spider import Spider
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
|
||||
class Spider(Spider):
|
||||
|
||||
def init(self, extend=""):
|
||||
tid = 'douyin'
|
||||
headers = self.gethr(0, tid)
|
||||
response = requests.head(self.hosts[tid], headers=headers)
|
||||
ttwid = response.cookies.get('ttwid')
|
||||
headers.update({
|
||||
'authority': self.hosts[tid].split('//')[-1],
|
||||
'cookie': f'ttwid={ttwid}' if ttwid else ''
|
||||
})
|
||||
self.dyheaders = headers
|
||||
pass
|
||||
|
||||
def getName(self):
|
||||
pass
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
pass
|
||||
|
||||
def manualVideoCheck(self):
|
||||
pass
|
||||
|
||||
def destroy(self):
|
||||
pass
|
||||
|
||||
headers = [
|
||||
{
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0"
|
||||
},
|
||||
{
|
||||
"User-Agent": "Dart/3.4 (dart:io)"
|
||||
}
|
||||
]
|
||||
|
||||
excepturl = 'https://www.baidu.com'
|
||||
|
||||
hosts = {
|
||||
"huya": ["https://www.huya.com","https://mp.huya.com"],
|
||||
"douyin": "https://live.douyin.com",
|
||||
"douyu": "https://www.douyu.com",
|
||||
"wangyi": "https://cc.163.com",
|
||||
"bili": ["https://api.live.bilibili.com", "https://api.bilibili.com"]
|
||||
}
|
||||
|
||||
referers = {
|
||||
"huya": "https://live.cdn.huya.com",
|
||||
"douyin": "https://live.douyin.com",
|
||||
"douyu": "https://m.douyu.com",
|
||||
"bili": "https://live.bilibili.com"
|
||||
}
|
||||
|
||||
playheaders = {
|
||||
"wangyi": {
|
||||
"User-Agent": "ExoPlayer",
|
||||
"Connection": "Keep-Alive",
|
||||
"Icy-MetaData": "1"
|
||||
},
|
||||
"bili": {
|
||||
'Accept': '*/*',
|
||||
'Icy-MetaData': '1',
|
||||
'referer': referers['bili'],
|
||||
'user-agent': headers[0]['User-Agent']
|
||||
},
|
||||
'douyin': {
|
||||
'User-Agent': 'libmpv',
|
||||
'Icy-MetaData': '1'
|
||||
},
|
||||
'huya': {
|
||||
'User-Agent': 'ExoPlayer',
|
||||
'Connection': 'Keep-Alive',
|
||||
'Icy-MetaData': '1'
|
||||
},
|
||||
'douyu': {
|
||||
'User-Agent': 'libmpv',
|
||||
'Icy-MetaData': '1'
|
||||
}
|
||||
}
|
||||
|
||||
def process_bili(self):
|
||||
try:
|
||||
self.blfdata = self.fetch(
|
||||
f'{self.hosts["bili"][0]}/room/v1/Area/getList?need_entrance=1&parent_id=0',
|
||||
headers=self.gethr(0, 'bili')
|
||||
).json()
|
||||
return ('bili', [{'key': 'cate', 'name': '分类',
|
||||
'value': [{'n': i['name'], 'v': str(i['id'])}
|
||||
for i in self.blfdata['data']]}])
|
||||
except Exception as e:
|
||||
print(f"bili处理错误: {e}")
|
||||
return 'bili', None
|
||||
|
||||
def process_douyin(self):
|
||||
try:
|
||||
data = self.getpq(self.hosts['douyin'], headers=self.dyheaders)('script')
|
||||
for i in data.items():
|
||||
if 'categoryData' in i.text():
|
||||
content = i.text()
|
||||
start = content.find('{')
|
||||
end = content.rfind('}') + 1
|
||||
if start != -1 and end != -1:
|
||||
json_str = content[start:end]
|
||||
json_str = json_str.replace('\\"', '"')
|
||||
try:
|
||||
self.dyifdata = json.loads(json_str)
|
||||
return ('douyin', [{'key': 'cate', 'name': '分类',
|
||||
'value': [{'n': i['partition']['title'],
|
||||
'v': f"{i['partition']['id_str']}@@{i['partition']['title']}"}
|
||||
for i in self.dyifdata['categoryData']]}])
|
||||
except json.JSONDecodeError as e:
|
||||
print(f"douyin解析错误: {e}")
|
||||
return 'douyin', None
|
||||
except Exception as e:
|
||||
print(f"douyin请求或处理错误: {e}")
|
||||
return 'douyin', None
|
||||
|
||||
def process_douyu(self):
|
||||
try:
|
||||
self.dyufdata = self.fetch(
|
||||
f'{self.referers["douyu"]}/api/cate/list',
|
||||
headers=self.headers[1]
|
||||
).json()
|
||||
return ('douyu', [{'key': 'cate', 'name': '分类',
|
||||
'value': [{'n': i['cate1Name'], 'v': str(i['cate1Id'])}
|
||||
for i in self.dyufdata['data']['cate1Info']]}])
|
||||
except Exception as e:
|
||||
print(f"douyu错误: {e}")
|
||||
return 'douyu', None
|
||||
|
||||
def homeContent(self, filter):
|
||||
result = {}
|
||||
cateManual = {
|
||||
"虎牙": "huya",
|
||||
"哔哩": "bili",
|
||||
"抖音": "douyin",
|
||||
"斗鱼": "douyu",
|
||||
"网易": "wangyi"
|
||||
}
|
||||
classes = []
|
||||
filters = {
|
||||
'huya': [{'key': 'cate', 'name': '分类',
|
||||
'value': [{'n': '网游', 'v': '1'}, {'n': '单机', 'v': '2'},
|
||||
{'n': '娱乐', 'v': '8'}, {'n': '手游', 'v': '3'}]}]
|
||||
}
|
||||
|
||||
with ThreadPoolExecutor(max_workers=3) as executor:
|
||||
futures = {
|
||||
executor.submit(self.process_bili): 'bili',
|
||||
executor.submit(self.process_douyin): 'douyin',
|
||||
executor.submit(self.process_douyu): 'douyu'
|
||||
}
|
||||
|
||||
for future in futures:
|
||||
platform, filter_data = future.result()
|
||||
if filter_data:
|
||||
filters[platform] = filter_data
|
||||
|
||||
for k in cateManual:
|
||||
classes.append({
|
||||
'type_name': k,
|
||||
'type_id': cateManual[k]
|
||||
})
|
||||
|
||||
result['class'] = classes
|
||||
result['filters'] = filters
|
||||
return result
|
||||
|
||||
def homeVideoContent(self):
|
||||
pass
|
||||
|
||||
def categoryContent(self, tid, pg, filter, extend):
|
||||
vdata = []
|
||||
result = {}
|
||||
pagecount = 9999
|
||||
result['page'] = pg
|
||||
result['limit'] = 90
|
||||
result['total'] = 999999
|
||||
if tid == 'wangyi':
|
||||
vdata, pagecount = self.wyccContent(tid, pg, filter, extend, vdata)
|
||||
elif 'bili' in tid:
|
||||
vdata, pagecount = self.biliContent(tid, pg, filter, extend, vdata)
|
||||
elif 'huya' in tid:
|
||||
vdata, pagecount = self.huyaContent(tid, pg, filter, extend, vdata)
|
||||
elif 'douyin' in tid:
|
||||
vdata, pagecount = self.douyinContent(tid, pg, filter, extend, vdata)
|
||||
elif 'douyu' in tid:
|
||||
vdata, pagecount = self.douyuContent(tid, pg, filter, extend, vdata)
|
||||
result['list'] = vdata
|
||||
result['pagecount'] = pagecount
|
||||
return result
|
||||
|
||||
def wyccContent(self, tid, pg, filter, extend, vdata):
|
||||
params = {
|
||||
'format': 'json',
|
||||
'start': (int(pg) - 1) * 20,
|
||||
'size': '20',
|
||||
}
|
||||
response = self.fetch(f'{self.hosts[tid]}/api/category/live/', params=params, headers=self.headers[0]).json()
|
||||
for i in response['lives']:
|
||||
if i.get('cuteid'):
|
||||
bvdata = self.buildvod(
|
||||
vod_id=f"{tid}@@{i['cuteid']}",
|
||||
vod_name=i.get('title'),
|
||||
vod_pic=i.get('cover'),
|
||||
vod_remarks=i.get('nickname'),
|
||||
style={"type": "rect", "ratio": 1.33}
|
||||
)
|
||||
vdata.append(bvdata)
|
||||
return vdata, 9999
|
||||
|
||||
def biliContent(self, tid, pg, filter, extend, vdata):
|
||||
if extend.get('cate') and pg == '1' and 'click' not in tid:
|
||||
for i in self.blfdata['data']:
|
||||
if str(i['id']) == extend['cate']:
|
||||
for j in i['list']:
|
||||
v = self.buildvod(
|
||||
vod_id=f"click_{tid}@@{i['id']}@@{j['id']}",
|
||||
vod_name=j.get('name'),
|
||||
vod_pic=j.get('pic'),
|
||||
vod_tag=1,
|
||||
style={"type": "oval", "ratio": 1}
|
||||
)
|
||||
vdata.append(v)
|
||||
return vdata, 1
|
||||
else:
|
||||
path = f'/xlive/web-interface/v1/second/getListByArea?platform=web&sort=online&page_size=30&page={pg}'
|
||||
if 'click' in tid:
|
||||
ids = tid.split('_')[1].split('@@')
|
||||
tid = ids[0]
|
||||
path = f'/xlive/web-interface/v1/second/getList?platform=web&parent_area_id={ids[1]}&area_id={ids[-1]}&sort_type=&page={pg}'
|
||||
data = self.fetch(f'{self.hosts[tid][0]}{path}', headers=self.gethr(0, tid)).json()
|
||||
for i in data['data']['list']:
|
||||
if i.get('roomid'):
|
||||
data = self.buildvod(
|
||||
f"{tid}@@{i['roomid']}",
|
||||
i.get('title'),
|
||||
i.get('cover'),
|
||||
i.get('watched_show', {}).get('text_large'),
|
||||
0,
|
||||
i.get('uname'),
|
||||
style={"type": "rect", "ratio": 1.33}
|
||||
)
|
||||
vdata.append(data)
|
||||
return vdata, 9999
|
||||
|
||||
def huyaContent(self, tid, pg, filter, extend, vdata):
|
||||
if extend.get('cate') and pg == '1' and 'click' not in tid:
|
||||
id = extend.get('cate')
|
||||
data = self.fetch(f'{self.referers[tid]}/liveconfig/game/bussLive?bussType={id}',
|
||||
headers=self.headers[1]).json()
|
||||
for i in data['data']:
|
||||
v = self.buildvod(
|
||||
vod_id=f"click_{tid}@@{int(i['gid'])}",
|
||||
vod_name=i.get('gameFullName'),
|
||||
vod_pic=f'https://huyaimg.msstatic.com/cdnimage/game/{int(i["gid"])}-MS.jpg',
|
||||
vod_tag=1,
|
||||
style={"type": "oval", "ratio": 1}
|
||||
)
|
||||
vdata.append(v)
|
||||
return vdata, 1
|
||||
else:
|
||||
gid = ''
|
||||
if 'click' in tid:
|
||||
ids = tid.split('_')[1].split('@@')
|
||||
tid = ids[0]
|
||||
gid = f'&gameId={ids[1]}'
|
||||
data = self.fetch(f'{self.hosts[tid][0]}/cache.php?m=LiveList&do=getLiveListByPage&tagAll=0{gid}&page={pg}',
|
||||
headers=self.headers[1]).json()
|
||||
for i in data['data']['datas']:
|
||||
if i.get('profileRoom'):
|
||||
v = self.buildvod(
|
||||
f"{tid}@@{i['profileRoom']}",
|
||||
i.get('introduction'),
|
||||
i.get('screenshot'),
|
||||
str(int(i.get('totalCount', '1')) / 10000) + '万',
|
||||
0,
|
||||
i.get('nick'),
|
||||
style={"type": "rect", "ratio": 1.33}
|
||||
|
||||
)
|
||||
vdata.append(v)
|
||||
return vdata, 9999
|
||||
|
||||
def douyinContent(self, tid, pg, filter, extend, vdata):
|
||||
if extend.get('cate') and pg == '1' and 'click' not in tid:
|
||||
ids = extend.get('cate').split('@@')
|
||||
for i in self.dyifdata['categoryData']:
|
||||
c = i['partition']
|
||||
if c['id_str'] == ids[0] and c['title'] == ids[1]:
|
||||
vlist = i['sub_partition'].copy()
|
||||
vlist.insert(0, {'partition': c})
|
||||
for j in vlist:
|
||||
j = j['partition']
|
||||
v = self.buildvod(
|
||||
vod_id=f"click_{tid}@@{j['id_str']}@@{j['type']}",
|
||||
vod_name=j.get('title'),
|
||||
vod_pic='https://p3-pc-weboff.byteimg.com/tos-cn-i-9r5gewecjs/pwa_v3/512x512-1.png',
|
||||
vod_tag=1,
|
||||
style={"type": "oval", "ratio": 1}
|
||||
)
|
||||
vdata.append(v)
|
||||
return vdata, 1
|
||||
else:
|
||||
path = f'/webcast/web/partition/detail/room/?aid=6383&app_name=douyin_web&live_id=1&device_platform=web&count=15&offset={(int(pg) - 1) * 15}&partition=720&partition_type=1'
|
||||
if 'click' in tid:
|
||||
ids = tid.split('_')[1].split('@@')
|
||||
tid = ids[0]
|
||||
path = f'/webcast/web/partition/detail/room/?aid=6383&app_name=douyin_web&live_id=1&device_platform=web&count=15&offset={(int(pg) - 1) * 15}&partition={ids[1]}&partition_type={ids[-1]}&req_from=2'
|
||||
data = self.fetch(f'{self.hosts[tid]}{path}', headers=self.dyheaders).json()
|
||||
for i in data['data']['data']:
|
||||
v = self.buildvod(
|
||||
vod_id=f"{tid}@@{i['web_rid']}",
|
||||
vod_name=i['room'].get('title'),
|
||||
vod_pic=i['room']['cover'].get('url_list')[0],
|
||||
vod_year=i.get('user_count_str'),
|
||||
vod_remarks=i['room']['owner'].get('nickname'),
|
||||
style={"type": "rect", "ratio": 1.33}
|
||||
)
|
||||
vdata.append(v)
|
||||
return vdata, 9999
|
||||
|
||||
def douyuContent(self, tid, pg, filter, extend, vdata):
|
||||
if extend.get('cate') and pg == '1' and 'click' not in tid:
|
||||
for i in self.dyufdata['data']['cate2Info']:
|
||||
if str(i['cate1Id']) == extend['cate']:
|
||||
v = self.buildvod(
|
||||
vod_id=f"click_{tid}@@{i['cate2Id']}",
|
||||
vod_name=i.get('cate2Name'),
|
||||
vod_pic=i.get('icon'),
|
||||
vod_remarks=i.get('count'),
|
||||
vod_tag=1,
|
||||
style={"type": "oval", "ratio": 1}
|
||||
)
|
||||
vdata.append(v)
|
||||
return vdata, 1
|
||||
else:
|
||||
path = f'/japi/weblist/apinc/allpage/6/{pg}'
|
||||
if 'click' in tid:
|
||||
ids = tid.split('_')[1].split('@@')
|
||||
tid = ids[0]
|
||||
path = f'/gapi/rkc/directory/mixList/2_{ids[1]}/{pg}'
|
||||
url = f'{self.hosts[tid]}{path}'
|
||||
data = self.fetch(url, headers=self.headers[1]).json()
|
||||
for i in data['data']['rl']:
|
||||
v = self.buildvod(
|
||||
vod_id=f"{tid}@@{i['rid']}",
|
||||
vod_name=i.get('rn'),
|
||||
vod_pic=i.get('rs16'),
|
||||
vod_year=str(int(i.get('ol', 1)) / 10000) + '万',
|
||||
vod_remarks=i.get('nn'),
|
||||
style={"type": "rect", "ratio": 1.33}
|
||||
)
|
||||
vdata.append(v)
|
||||
return vdata, 9999
|
||||
|
||||
def detailContent(self, ids):
|
||||
ids = ids[0].split('@@')
|
||||
if ids[0] == 'wangyi':
|
||||
vod = self.wyccDetail(ids)
|
||||
elif ids[0] == 'bili':
|
||||
vod = self.biliDetail(ids)
|
||||
elif ids[0] == 'huya':
|
||||
vod = self.huyaDetail(ids)
|
||||
elif ids[0] == 'douyin':
|
||||
vod = self.douyinDetail(ids)
|
||||
elif ids[0] == 'douyu':
|
||||
vod = self.douyuDetail(ids)
|
||||
return {'list': [vod]}
|
||||
|
||||
def wyccDetail(self, ids):
|
||||
try:
|
||||
vdata = self.getpq(f'{self.hosts[ids[0]]}/{ids[1]}', self.headers[0])('script').eq(-1).text()
|
||||
|
||||
def get_quality_name(vbr):
|
||||
if vbr <= 600:
|
||||
return "标清"
|
||||
elif vbr <= 1000:
|
||||
return "高清"
|
||||
elif vbr <= 2000:
|
||||
return "超清"
|
||||
else:
|
||||
return "蓝光"
|
||||
|
||||
data = json.loads(vdata)['props']['pageProps']['roomInfoInitData']
|
||||
name = data['live'].get('title', ids[0])
|
||||
vod = self.buildvod(vod_name=data.get('keywords_suffix'), vod_remarks=data['live'].get('title'),
|
||||
vod_content=data.get('description_suffix'))
|
||||
resolution_data = data['live']['quickplay']['resolution']
|
||||
all_streams = {}
|
||||
sorted_qualities = sorted(resolution_data.items(),
|
||||
key=lambda x: x[1]['vbr'],
|
||||
reverse=True)
|
||||
for quality, data in sorted_qualities:
|
||||
vbr = data['vbr']
|
||||
quality_name = get_quality_name(vbr)
|
||||
for cdn_name, url in data['cdn'].items():
|
||||
if cdn_name not in all_streams and type(url) == str and url.startswith('http'):
|
||||
all_streams[cdn_name] = []
|
||||
if isinstance(url, str) and url.startswith('http'):
|
||||
all_streams[cdn_name].extend([quality_name, url])
|
||||
plists = []
|
||||
names = []
|
||||
for i, (cdn_name, stream_list) in enumerate(all_streams.items(), 1):
|
||||
names.append(f'线路{i}')
|
||||
pstr = f"{name}${ids[0]}@@{self.e64(json.dumps(stream_list))}"
|
||||
plists.append(pstr)
|
||||
vod['vod_play_from'] = "$$$".join(names)
|
||||
vod['vod_play_url'] = "$$$".join(plists)
|
||||
return vod
|
||||
except Exception as e:
|
||||
return self.handle_exception(e)
|
||||
|
||||
def biliDetail(self, ids):
|
||||
try:
|
||||
vdata = self.fetch(
|
||||
f'{self.hosts[ids[0]][0]}/xlive/web-room/v1/index/getInfoByRoom?room_id={ids[1]}&wts={int(time.time())}',
|
||||
headers=self.gethr(0, ids[0])).json()
|
||||
v = vdata['data']['room_info']
|
||||
vod = self.buildvod(
|
||||
vod_name=v.get('title'),
|
||||
type_name=v.get('parent_area_name') + '/' + v.get('area_name'),
|
||||
vod_remarks=v.get('tags'),
|
||||
vod_play_from=v.get('title'),
|
||||
)
|
||||
data = self.fetch(
|
||||
f'{self.hosts[ids[0]][0]}/xlive/web-room/v2/index/getRoomPlayInfo?room_id={ids[1]}&protocol=0%2C1&format=0%2C1%2C2&codec=0%2C1&platform=web',
|
||||
headers=self.gethr(0, ids[0])).json()
|
||||
vdnams = data['data']['playurl_info']['playurl']['g_qn_desc']
|
||||
all_accept_qns = []
|
||||
streams = data['data']['playurl_info']['playurl']['stream']
|
||||
for stream in streams:
|
||||
for format_item in stream['format']:
|
||||
for codec in format_item['codec']:
|
||||
if 'accept_qn' in codec:
|
||||
all_accept_qns.append(codec['accept_qn'])
|
||||
max_accept_qn = max(all_accept_qns, key=len) if all_accept_qns else []
|
||||
quality_map = {
|
||||
item['qn']: item['desc']
|
||||
for item in vdnams
|
||||
}
|
||||
quality_names = [f"{quality_map.get(qn)}${ids[0]}@@{ids[1]}@@{qn}" for qn in max_accept_qn]
|
||||
vod['vod_play_url'] = "#".join(quality_names)
|
||||
return vod
|
||||
except Exception as e:
|
||||
return self.handle_exception(e)
|
||||
|
||||
def huyaDetail(self, ids):
|
||||
try:
|
||||
vdata = self.fetch(f'{self.hosts[ids[0]][1]}/cache.php?m=Live&do=profileRoom&roomid={ids[1]}',
|
||||
headers=self.headers[0]).json()
|
||||
v = vdata['data']['liveData']
|
||||
vod = self.buildvod(
|
||||
vod_name=v.get('introduction'),
|
||||
type_name=v.get('gameFullName'),
|
||||
vod_director=v.get('nick'),
|
||||
vod_remarks=v.get('contentIntro'),
|
||||
)
|
||||
data = dict(reversed(list(vdata['data']['stream'].items())))
|
||||
names = []
|
||||
plist = []
|
||||
|
||||
for stream_type, stream_data in data.items():
|
||||
if isinstance(stream_data, dict) and 'multiLine' in stream_data and 'rateArray' in stream_data:
|
||||
names.append(f"线路{len(names) + 1}")
|
||||
qualities = sorted(
|
||||
stream_data['rateArray'],
|
||||
key=lambda x: (x['iBitRate'], x['sDisplayName']),
|
||||
reverse=True
|
||||
)
|
||||
cdn_urls = []
|
||||
for cdn in stream_data['multiLine']:
|
||||
quality_urls = []
|
||||
for quality in qualities:
|
||||
quality_name = quality['sDisplayName']
|
||||
bit_rate = quality['iBitRate']
|
||||
base_url = cdn['url']
|
||||
if bit_rate > 0:
|
||||
if '.m3u8' in base_url:
|
||||
new_url = base_url.replace(
|
||||
'ratio=2000',
|
||||
f'ratio={bit_rate}'
|
||||
)
|
||||
else:
|
||||
new_url = base_url.replace(
|
||||
'imgplus.flv',
|
||||
f'imgplus_{bit_rate}.flv'
|
||||
)
|
||||
else:
|
||||
new_url = base_url
|
||||
quality_urls.extend([quality_name, new_url])
|
||||
encoded_urls = self.e64(json.dumps(quality_urls))
|
||||
cdn_urls.append(f"{cdn['cdnType']}${ids[0]}@@{encoded_urls}")
|
||||
|
||||
if cdn_urls:
|
||||
plist.append('#'.join(cdn_urls))
|
||||
vod['vod_play_from'] = "$$$".join(names)
|
||||
vod['vod_play_url'] = "$$$".join(plist)
|
||||
return vod
|
||||
except Exception as e:
|
||||
return self.handle_exception(e)
|
||||
|
||||
def douyinDetail(self, ids):
|
||||
url = f'{self.hosts[ids[0]]}/webcast/room/web/enter/?aid=6383&app_name=douyin_web&live_id=1&device_platform=web&enter_from=web_live&web_rid={ids[1]}&room_id_str=&enter_source=&Room-Enter-User-Login-Ab=0&is_need_double_stream=false&cookie_enabled=true&screen_width=1980&screen_height=1080&browser_language=zh-CN&browser_platform=Win32&browser_name=Edge&browser_version=125.0.0.0'
|
||||
data = self.fetch(url, headers=self.dyheaders).json()
|
||||
try:
|
||||
vdata = data['data']['data'][0]
|
||||
vod = self.buildvod(
|
||||
vod_name=vdata['title'],
|
||||
vod_remarks=vdata['user_count_str'],
|
||||
)
|
||||
resolution_data = vdata['stream_url']['live_core_sdk_data']['pull_data']['options']['qualities']
|
||||
stream_json = vdata['stream_url']['live_core_sdk_data']['pull_data']['stream_data']
|
||||
stream_json = json.loads(stream_json)
|
||||
available_types = []
|
||||
if any(sdk_key in stream_json['data'] and 'main' in stream_json['data'][sdk_key] for sdk_key in
|
||||
stream_json['data']):
|
||||
available_types.append('main')
|
||||
if any(sdk_key in stream_json['data'] and 'backup' in stream_json['data'][sdk_key] for sdk_key in
|
||||
stream_json['data']):
|
||||
available_types.append('backup')
|
||||
plist = []
|
||||
for line_type in available_types:
|
||||
format_arrays = {'flv': [], 'hls': [], 'lls': []}
|
||||
qualities = sorted(resolution_data, key=lambda x: x['level'], reverse=True)
|
||||
for quality in qualities:
|
||||
sdk_key = quality['sdk_key']
|
||||
if sdk_key in stream_json['data'] and line_type in stream_json['data'][sdk_key]:
|
||||
stream_info = stream_json['data'][sdk_key][line_type]
|
||||
if stream_info.get('flv'):
|
||||
format_arrays['flv'].extend([quality['name'], stream_info['flv']])
|
||||
if stream_info.get('hls'):
|
||||
format_arrays['hls'].extend([quality['name'], stream_info['hls']])
|
||||
if stream_info.get('lls'):
|
||||
format_arrays['lls'].extend([quality['name'], stream_info['lls']])
|
||||
format_urls = []
|
||||
for format_name, url_array in format_arrays.items():
|
||||
if url_array:
|
||||
encoded_urls = self.e64(json.dumps(url_array))
|
||||
format_urls.append(f"{format_name}${ids[0]}@@{encoded_urls}")
|
||||
|
||||
if format_urls:
|
||||
plist.append('#'.join(format_urls))
|
||||
|
||||
names = ['线路1', '线路2'][:len(plist)]
|
||||
vod['vod_play_from'] = "$$$".join(names)
|
||||
vod['vod_play_url'] = "$$$".join(plist)
|
||||
return vod
|
||||
|
||||
except Exception as e:
|
||||
return self.handle_exception(e)
|
||||
|
||||
def douyuDetail(self, ids):
|
||||
headers = self.gethr(0, zr=f'{self.hosts[ids[0]]}/{ids[1]}')
|
||||
try:
|
||||
data = self.fetch(f'{self.hosts[ids[0]]}/betard/{ids[1]}', headers=headers).json()
|
||||
vname = data['room']['room_name']
|
||||
vod = self.buildvod(
|
||||
vod_name=vname,
|
||||
vod_remarks=data['room'].get('second_lvl_name'),
|
||||
vod_director=data['room'].get('nickname'),
|
||||
)
|
||||
vdata = self.fetch(f'{self.hosts[ids[0]]}/swf_api/homeH5Enc?rids={ids[1]}', headers=headers).json()
|
||||
json_body = vdata['data']
|
||||
json_body = {"html": self.douyu_text(json_body[f'room{ids[1]}']), "rid": ids[1]}
|
||||
sign = self.post('http://alive.nsapps.cn/api/AllLive/DouyuSign', json=json_body, headers=self.headers[1]).json()['data']
|
||||
body = f'{sign}&cdn=&rate=-1&ver=Douyu_223061205&iar=1&ive=1&hevc=0&fa=0'
|
||||
body=self.params_to_json(body)
|
||||
nubdata = self.post(f'{self.hosts[ids[0]]}/lapi/live/getH5Play/{ids[1]}', data=body, headers=headers).json()
|
||||
plist = []
|
||||
names = []
|
||||
for i,x in enumerate(nubdata['data']['cdnsWithName']):
|
||||
names.append(f'线路{i+1}')
|
||||
d = {'sign': sign, 'cdn': x['cdn'], 'id': ids[1]}
|
||||
plist.append(
|
||||
f'{vname}${ids[0]}@@{self.e64(json.dumps(d))}@@{self.e64(json.dumps(nubdata["data"]["multirates"]))}')
|
||||
vod['vod_play_from'] = "$$$".join(names)
|
||||
vod['vod_play_url'] = "$$$".join(plist)
|
||||
return vod
|
||||
except Exception as e:
|
||||
return self.handle_exception(e)
|
||||
|
||||
def douyu_text(self, text):
|
||||
function_positions = [m.start() for m in re.finditer('function', text)]
|
||||
total_functions = len(function_positions)
|
||||
if total_functions % 2 == 0:
|
||||
target_index = total_functions // 2 + 1
|
||||
else:
|
||||
target_index = (total_functions - 1) // 2 + 1
|
||||
if total_functions >= target_index:
|
||||
cut_position = function_positions[target_index - 1]
|
||||
ctext = text[4:cut_position]
|
||||
return re.sub(r'eval\(strc\)\([\w\d,]+\)', 'strc', ctext)
|
||||
return text
|
||||
|
||||
def searchContent(self, key, quick, pg="1"):
|
||||
pass
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
try:
|
||||
ids = id.split('@@')
|
||||
p = 1
|
||||
if ids[0] in ['wangyi', 'douyin','huya']:
|
||||
p, url = 0, json.loads(self.d64(ids[1]))
|
||||
elif ids[0] == 'bili':
|
||||
p, url = self.biliplay(ids)
|
||||
elif ids[0] == 'huya':
|
||||
p, url = 0, json.loads(self.d64(ids[1]))
|
||||
elif ids[0] == 'douyu':
|
||||
p, url = self.douyuplay(ids)
|
||||
return {'parse': p, 'url': url, 'header': self.playheaders[ids[0]]}
|
||||
except Exception as e:
|
||||
return {'parse': 1, 'url': self.excepturl, 'header': self.headers[0]}
|
||||
|
||||
def biliplay(self, ids):
|
||||
try:
|
||||
data = self.fetch(
|
||||
f'{self.hosts[ids[0]][0]}/xlive/web-room/v2/index/getRoomPlayInfo?room_id={ids[1]}&protocol=0,1&format=0,2&codec=0&platform=web&qn={ids[2]}',
|
||||
headers=self.gethr(0, ids[0])).json()
|
||||
urls = []
|
||||
line_index = 1
|
||||
for stream in data['data']['playurl_info']['playurl']['stream']:
|
||||
for format_item in stream['format']:
|
||||
for codec in format_item['codec']:
|
||||
for url_info in codec['url_info']:
|
||||
full_url = f"{url_info['host']}/{codec['base_url'].lstrip('/')}{url_info['extra']}"
|
||||
urls.extend([f"线路{line_index}", full_url])
|
||||
line_index += 1
|
||||
return 0, urls
|
||||
except Exception as e:
|
||||
return 1, self.excepturl
|
||||
|
||||
def douyuplay(self, ids):
|
||||
try:
|
||||
sdata = json.loads(self.d64(ids[1]))
|
||||
headers = self.gethr(0, zr=f'{self.hosts[ids[0]]}/{sdata["id"]}')
|
||||
ldata = json.loads(self.d64(ids[2]))
|
||||
result_obj = {}
|
||||
with ThreadPoolExecutor(max_workers=len(ldata)) as executor:
|
||||
futures = [
|
||||
executor.submit(
|
||||
self.douyufp,
|
||||
sdata,
|
||||
quality,
|
||||
headers,
|
||||
self.hosts[ids[0]],
|
||||
result_obj
|
||||
) for quality in ldata
|
||||
]
|
||||
for future in futures:
|
||||
future.result()
|
||||
|
||||
result = []
|
||||
for bit in sorted(result_obj.keys(), reverse=True):
|
||||
result.extend(result_obj[bit])
|
||||
|
||||
if result:
|
||||
return 0, result
|
||||
return 1, self.excepturl
|
||||
|
||||
except Exception as e:
|
||||
return 1, self.excepturl
|
||||
|
||||
def douyufp(self, sdata, quality, headers, host, result_obj):
|
||||
try:
|
||||
body = f'{sdata["sign"]}&cdn={sdata["cdn"]}&rate={quality["rate"]}'
|
||||
body=self.params_to_json(body)
|
||||
data = self.post(f'{host}/lapi/live/getH5Play/{sdata["id"]}',
|
||||
data=body, headers=headers).json()
|
||||
if data.get('data'):
|
||||
play_url = data['data']['rtmp_url'] + '/' + data['data']['rtmp_live']
|
||||
bit = quality.get('bit', 0)
|
||||
if bit not in result_obj:
|
||||
result_obj[bit] = []
|
||||
result_obj[bit].extend([quality['name'], play_url])
|
||||
except Exception as e:
|
||||
print(f"Error fetching {quality['name']}: {str(e)}")
|
||||
|
||||
def localProxy(self, param):
|
||||
pass
|
||||
|
||||
def e64(self, text):
|
||||
try:
|
||||
text_bytes = text.encode('utf-8')
|
||||
encoded_bytes = b64encode(text_bytes)
|
||||
return encoded_bytes.decode('utf-8')
|
||||
except Exception as e:
|
||||
print(f"Base64编码错误: {str(e)}")
|
||||
return ""
|
||||
|
||||
def d64(self, encoded_text):
|
||||
try:
|
||||
encoded_bytes = encoded_text.encode('utf-8')
|
||||
decoded_bytes = b64decode(encoded_bytes)
|
||||
return decoded_bytes.decode('utf-8')
|
||||
except Exception as e:
|
||||
print(f"Base64解码错误: {str(e)}")
|
||||
return ""
|
||||
|
||||
def josn_to_params(self, params, skip_empty=False):
|
||||
query = []
|
||||
for k, v in params.items():
|
||||
if skip_empty and not v:
|
||||
continue
|
||||
query.append(f"{k}={v}")
|
||||
return "&".join(query)
|
||||
|
||||
def params_to_json(self, query_string):
|
||||
parsed_data = parse_qs(query_string)
|
||||
result = {key: value[0] for key, value in parsed_data.items()}
|
||||
return result
|
||||
|
||||
def buildvod(self, vod_id='', vod_name='', vod_pic='', vod_year='', vod_tag='', vod_remarks='', style='',
|
||||
type_name='', vod_area='', vod_actor='', vod_director='',
|
||||
vod_content='', vod_play_from='', vod_play_url=''):
|
||||
vod = {
|
||||
'vod_id': vod_id,
|
||||
'vod_name': vod_name,
|
||||
'vod_pic': vod_pic,
|
||||
'vod_year': vod_year,
|
||||
'vod_tag': 'folder' if vod_tag else '',
|
||||
'vod_remarks': vod_remarks,
|
||||
'style': style,
|
||||
'type_name': type_name,
|
||||
'vod_area': vod_area,
|
||||
'vod_actor': vod_actor,
|
||||
'vod_director': vod_director,
|
||||
'vod_content': vod_content,
|
||||
'vod_play_from': vod_play_from,
|
||||
'vod_play_url': vod_play_url
|
||||
}
|
||||
vod = {key: value for key, value in vod.items() if value}
|
||||
return vod
|
||||
|
||||
def getpq(self, url, headers=None, cookies=None):
|
||||
data = self.fetch(url, headers=headers, cookies=cookies).text
|
||||
try:
|
||||
return pq(data)
|
||||
except Exception as e:
|
||||
print(f"解析页面错误: {str(e)}")
|
||||
return pq(data.encode('utf-8'))
|
||||
|
||||
def gethr(self, index, rf='', zr=''):
|
||||
headers = self.headers[index]
|
||||
if zr:
|
||||
headers['referer'] = zr
|
||||
else:
|
||||
headers['referer'] = f"{self.referers[rf]}/"
|
||||
return headers
|
||||
|
||||
def handle_exception(self, e):
|
||||
print(f"报错: {str(e)}")
|
||||
return {'vod_play_from': '哎呀翻车啦', 'vod_play_url': f'翻车啦${self.excepturl}'}
|
||||
|
159
PY/LREEOK.py
Normal file
159
PY/LREEOK.py
Normal file
@ -0,0 +1,159 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# by @嗷呜
|
||||
# 温馨提示:官方APP数据是错误的,你们可以给官方反馈,然后就可以写APP
|
||||
import re
|
||||
import sys
|
||||
from Crypto.Hash import MD5
|
||||
sys.path.append("..")
|
||||
import json
|
||||
import time
|
||||
from pyquery import PyQuery as pq
|
||||
from base.spider import Spider
|
||||
|
||||
|
||||
class Spider(Spider):
|
||||
|
||||
def init(self, extend=""):
|
||||
pass
|
||||
|
||||
def getName(self):
|
||||
pass
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
pass
|
||||
|
||||
def manualVideoCheck(self):
|
||||
pass
|
||||
|
||||
def action(self, action):
|
||||
pass
|
||||
|
||||
def destroy(self):
|
||||
pass
|
||||
|
||||
host = 'https://www.lreeok.vip'
|
||||
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
|
||||
'Accept': 'application/json, text/javascript, */*; q=0.01',
|
||||
'sec-ch-ua-platform': '"macOS"',
|
||||
'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="134", "Google Chrome";v="134"',
|
||||
'Origin': host,
|
||||
'Referer': f"{host}/",
|
||||
}
|
||||
|
||||
def homeContent(self, filter):
|
||||
data = self.getpq(self.fetch(self.host, headers=self.headers).text)
|
||||
result = {}
|
||||
classes = []
|
||||
for k in data('.head-more.box a').items():
|
||||
i = k.attr('href')
|
||||
if i and '/vod' in i:
|
||||
classes.append({
|
||||
'type_name': k.text(),
|
||||
'type_id': re.search(r'\d+', i).group(0)
|
||||
})
|
||||
result['class'] = classes
|
||||
result['list'] = self.getlist(data('.border-box.diy-center .public-list-div'))
|
||||
return result
|
||||
|
||||
def homeVideoContent(self):
|
||||
pass
|
||||
|
||||
def categoryContent(self, tid, pg, filter, extend):
|
||||
body = {'type': tid, 'class': '', 'area': '', 'lang': '', 'version': '', 'state': '', 'letter': '', 'page': pg}
|
||||
data = self.post(f"{self.host}/index.php/api/vod", headers=self.headers, data=self.getbody(body)).json()
|
||||
result = {}
|
||||
result['list'] = data['list']
|
||||
result['page'] = pg
|
||||
result['pagecount'] = 9999
|
||||
result['limit'] = 90
|
||||
result['total'] = 999999
|
||||
return result
|
||||
|
||||
def detailContent(self, ids):
|
||||
data = self.getpq(self.fetch(f"{self.host}/voddetail/{ids[0]}.html", headers=self.headers).text)
|
||||
v = data('.detail-info.lightSpeedIn .slide-info')
|
||||
vod = {
|
||||
'vod_year': v.eq(-1).text(),
|
||||
'vod_remarks': v.eq(0).text(),
|
||||
'vod_actor': v.eq(3).text(),
|
||||
'vod_director': v.eq(2).text(),
|
||||
'vod_content': data('.switch-box #height_limit').text()
|
||||
}
|
||||
np = data('.anthology.wow.fadeInUp')
|
||||
ndata = np('.anthology-tab .swiper-wrapper .swiper-slide')
|
||||
pdata = np('.anthology-list .anthology-list-box ul')
|
||||
play, names = [], []
|
||||
for i in range(len(ndata)):
|
||||
n = ndata.eq(i)('a')
|
||||
n('span').remove()
|
||||
names.append(n.text())
|
||||
vs = []
|
||||
for v in pdata.eq(i)('li').items():
|
||||
vs.append(f"{v.text()}${v('a').attr('href')}")
|
||||
play.append('#'.join(vs))
|
||||
vod["vod_play_from"] = "$$$".join(names)
|
||||
vod["vod_play_url"] = "$$$".join(play)
|
||||
result = {"list": [vod]}
|
||||
return result
|
||||
|
||||
def searchContent(self, key, quick, pg="1"):
|
||||
data = self.getpq(self.fetch(f"{self.host}/vodsearch/{key}----------{pg}---.html", headers=self.headers).text)
|
||||
return {'list': self.getlist(data('.row-right .search-box .public-list-bj')), 'page': pg}
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
h, p = {"User-Agent": "okhttp/3.14.9"}, 1
|
||||
url = f"{self.host}{id}"
|
||||
data = self.getpq(self.fetch(url, headers=self.headers).text)
|
||||
try:
|
||||
jstr = data('.player .player-left script').eq(0).text()
|
||||
jsdata = json.loads(jstr.split('aaa=')[-1])
|
||||
body = {'url': jsdata['url']}
|
||||
if not re.search(r'\.m3u8|\.mp4', body['url']):
|
||||
data = self.post(f"{self.host}/okplay/api_config.php", headers=self.headers,
|
||||
data=self.getbody(body)).json()
|
||||
url = data.get('url') or data.get('data', {}).get('url')
|
||||
p = 0
|
||||
except Exception as e:
|
||||
print('错误信息:', e)
|
||||
pass
|
||||
result = {}
|
||||
result["parse"] = p
|
||||
result["url"] = url
|
||||
result["header"] = h
|
||||
return result
|
||||
|
||||
def localProxy(self, param):
|
||||
pass
|
||||
|
||||
def getbody(self, params):
|
||||
t = int(time.time())
|
||||
h = MD5.new()
|
||||
h.update(f"DS{t}DCC147D11943AF75".encode('utf-8'))
|
||||
key = h.hexdigest()
|
||||
params.update({'time': t, 'key': key})
|
||||
return params
|
||||
|
||||
def getlist(self, data):
|
||||
videos = []
|
||||
for i in data.items():
|
||||
id = i('a').attr('href')
|
||||
if id:
|
||||
id = re.search(r'\d+', id).group(0)
|
||||
img = i('img').attr('data-src')
|
||||
if img and 'url=' in img: img = f'{self.host}{img}'
|
||||
videos.append({
|
||||
'vod_id': id,
|
||||
'vod_name': i('img').attr('alt'),
|
||||
'vod_pic': img,
|
||||
'vod_remarks': i('.public-prt').text() or i('.public-list-prb').text()
|
||||
})
|
||||
return videos
|
||||
|
||||
def getpq(self, data):
|
||||
try:
|
||||
return pq(data)
|
||||
except Exception as e:
|
||||
print(f"{str(e)}")
|
||||
return pq(data.encode('utf-8'))
|
268
PY/Phb.py
Normal file
268
PY/Phb.py
Normal file
@ -0,0 +1,268 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# by @嗷呜
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
from pyquery import PyQuery as pq
|
||||
from base64 import b64decode, b64encode
|
||||
from requests import Session
|
||||
sys.path.append('..')
|
||||
from base.spider import Spider
|
||||
|
||||
|
||||
class Spider(Spider):
|
||||
|
||||
def init(self, extend=""):
|
||||
self.host=self.gethost()
|
||||
self.headers['referer']=f'{self.host}/'
|
||||
self.session = Session()
|
||||
self.session.headers.update(self.headers)
|
||||
pass
|
||||
|
||||
def getName(self):
|
||||
pass
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
pass
|
||||
|
||||
def manualVideoCheck(self):
|
||||
pass
|
||||
|
||||
def destroy(self):
|
||||
pass
|
||||
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36',
|
||||
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
|
||||
'sec-ch-ua': '"Not(A:Brand";v="99", "Google Chrome";v="133", "Chromium";v="133"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-full-version': '"133.0.6943.98"',
|
||||
'sec-ch-ua-arch': '"x86"',
|
||||
'sec-ch-ua-platform': '"Windows"',
|
||||
'sec-ch-ua-platform-version': '"19.0.0"',
|
||||
'sec-ch-ua-model': '""',
|
||||
'sec-ch-ua-full-version-list': '"Not(A:Brand";v="99.0.0.0", "Google Chrome";v="133.0.6943.98", "Chromium";v="133.0.6943.98"',
|
||||
'dnt': '1',
|
||||
'upgrade-insecure-requests': '1',
|
||||
'sec-fetch-site': 'none',
|
||||
'sec-fetch-mode': 'navigate',
|
||||
'sec-fetch-user': '?1',
|
||||
'sec-fetch-dest': 'document',
|
||||
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
|
||||
'priority': 'u=0, i'
|
||||
}
|
||||
|
||||
def homeContent(self, filter):
|
||||
result = {}
|
||||
cateManual = {
|
||||
"视频": "/video",
|
||||
"片单": "/playlists",
|
||||
"频道": "/channels",
|
||||
"分类": "/categories",
|
||||
"明星": "/pornstars"
|
||||
}
|
||||
classes = []
|
||||
filters = {}
|
||||
for k in cateManual:
|
||||
classes.append({
|
||||
'type_name': k,
|
||||
'type_id': cateManual[k]
|
||||
})
|
||||
result['class'] = classes
|
||||
result['filters'] = filters
|
||||
return result
|
||||
|
||||
def homeVideoContent(self):
|
||||
data = self.getpq('/recommended')
|
||||
vhtml = data("#recommendedListings .pcVideoListItem .phimage")
|
||||
return {'list':self.getlist(vhtml)}
|
||||
|
||||
def categoryContent(self, tid, pg, filter, extend):
|
||||
vdata = []
|
||||
result = {}
|
||||
result['page'] = pg
|
||||
result['pagecount'] = 9999
|
||||
result['limit'] = 90
|
||||
result['total'] = 999999
|
||||
if tid=='/video' or '_this_video' in tid:
|
||||
pagestr = f'&' if '?' in tid else f'?'
|
||||
tid=tid.split('_this_video')[0]
|
||||
data=self.getpq(f'{tid}{pagestr}page={pg}')
|
||||
vdata=self.getlist(data('#videoCategory .pcVideoListItem'))
|
||||
elif tid == '/playlists':
|
||||
data=self.getpq(f'{tid}?page={pg}')
|
||||
vhtml=data('#playListSection li')
|
||||
vdata = []
|
||||
for i in vhtml.items():
|
||||
vdata.append({
|
||||
'vod_id': 'playlists_click_' + i('.thumbnail-info-wrapper .display-block a').attr('href'),
|
||||
'vod_name': i('.thumbnail-info-wrapper .display-block a').attr('title'),
|
||||
'vod_pic': i('.largeThumb').attr('src'),
|
||||
'vod_tag': 'folder',
|
||||
'vod_remarks': i('.playlist-videos .number').text(),
|
||||
'style': {"type": "rect", "ratio": 1.33}
|
||||
})
|
||||
elif tid=='/channels':
|
||||
data=self.getpq(f'{tid}?o=rk&page={pg}')
|
||||
vhtml=data('#filterChannelsSection li .description')
|
||||
vdata=[]
|
||||
for i in vhtml.items():
|
||||
vdata.append({
|
||||
'vod_id': 'director_click_'+i('.avatar a').attr('href'),
|
||||
'vod_name': i('.avatar img').attr('alt'),
|
||||
'vod_pic': i('.avatar img').attr('src'),
|
||||
'vod_tag':'folder',
|
||||
'vod_remarks': i('.descriptionContainer ul li').eq(-1).text(),
|
||||
'style':{"type": "rect", "ratio": 1.33}
|
||||
})
|
||||
elif tid=='/categories' and pg=='1':
|
||||
result['pagecount'] = 1
|
||||
data=self.getpq(f'{tid}')
|
||||
vhtml=data('.categoriesListSection li .relativeWrapper')
|
||||
vdata=[]
|
||||
for i in vhtml.items():
|
||||
vdata.append({
|
||||
'vod_id': i('a').attr('href')+'_this_video',
|
||||
'vod_name': i('a').attr('alt'),
|
||||
'vod_pic': i('a img').attr('src'),
|
||||
'vod_tag':'folder',
|
||||
'style':{"type": "rect", "ratio": 1.33}
|
||||
})
|
||||
elif tid=='/pornstars':
|
||||
data=self.getpq(f'{tid}?o=t&page={pg}')
|
||||
vhtml=data('#popularPornstars .performerCard .wrap')
|
||||
vdata=[]
|
||||
for i in vhtml.items():
|
||||
vdata.append({
|
||||
'vod_id': 'pornstars_click_'+i('a').attr('href'),
|
||||
'vod_name': i('.performerCardName').text(),
|
||||
'vod_pic': i('a img').attr('src'),
|
||||
'vod_tag':'folder',
|
||||
'vod_year':i('.performerVideosViewsCount span').eq(0).text(),
|
||||
'vod_remarks': i('.performerVideosViewsCount span').eq(-1).text(),
|
||||
'style':{"type": "rect", "ratio": 1.33}
|
||||
})
|
||||
elif 'playlists_click' in tid:
|
||||
tid=tid.split('click_')[-1]
|
||||
if pg=='1':
|
||||
hdata=self.getpq(tid)
|
||||
self.token=hdata('#searchInput').attr('data-token')
|
||||
vdata = self.getlist(hdata('#videoPlaylist .pcVideoListItem .phimage'))
|
||||
else:
|
||||
tid=tid.split('playlist/')[-1]
|
||||
data=self.getpq(f'/playlist/viewChunked?id={tid}&token={self.token}&page={pg}')
|
||||
vdata=self.getlist(data('.pcVideoListItem .phimage'))
|
||||
elif 'director_click' in tid:
|
||||
tid=tid.split('click_')[-1]
|
||||
data=self.getpq(f'{tid}/videos?page={pg}')
|
||||
vdata=self.getlist(data('#showAllChanelVideos .pcVideoListItem .phimage'))
|
||||
elif 'pornstars_click' in tid:
|
||||
tid=tid.split('click_')[-1]
|
||||
data=self.getpq(f'{tid}/videos?page={pg}')
|
||||
vdata=self.getlist(data('#mostRecentVideosSection .pcVideoListItem .phimage'))
|
||||
result['list'] = vdata
|
||||
return result
|
||||
|
||||
def detailContent(self, ids):
|
||||
url = f"{self.host}{ids[0]}"
|
||||
data = self.getpq(ids[0])
|
||||
vn=data('meta[property="og:title"]').attr('content')
|
||||
dtext=data('.userInfo .usernameWrap a')
|
||||
pdtitle = '[a=cr:' + json.dumps({'id': 'director_click_'+dtext.attr('href'), 'name': dtext.text()}) + '/]' + dtext.text() + '[/a]'
|
||||
vod = {
|
||||
'vod_name': vn,
|
||||
'vod_director':pdtitle,
|
||||
'vod_remarks': (data('.userInfo').text()+' / '+data('.ratingInfo').text()).replace('\n',' / '),
|
||||
'vod_play_from': 'Pornhub',
|
||||
'vod_play_url': ''
|
||||
}
|
||||
js_content = data("#player script").eq(0).text()
|
||||
plist = [f"{vn}${self.e64(f'{1}@@@@{url}')}"]
|
||||
try:
|
||||
pattern = r'"mediaDefinitions":\s*(\[.*?\]),\s*"isVertical"'
|
||||
match = re.search(pattern, js_content, re.DOTALL)
|
||||
if match:
|
||||
json_str = match.group(1)
|
||||
udata = json.loads(json_str)
|
||||
plist = [
|
||||
f"{media['height']}${self.e64(f'{0}@@@@{url}')}"
|
||||
for media in udata[:-1]
|
||||
if (url := media.get('videoUrl'))
|
||||
]
|
||||
except Exception as e:
|
||||
print(f"提取mediaDefinitions失败: {str(e)}")
|
||||
vod['vod_play_url'] = '#'.join(plist)
|
||||
return {'list':[vod]}
|
||||
|
||||
def searchContent(self, key, quick, pg="1"):
|
||||
data=self.getpq(f'/video/search?search={key}&page={pg}')
|
||||
return {'list':self.getlist(data('#videoSearchResult .pcVideoListItem .phimage'))}
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.5410.0 Safari/537.36',
|
||||
'pragma': 'no-cache',
|
||||
'cache-control': 'no-cache',
|
||||
'sec-ch-ua-platform': '"Windows"',
|
||||
'sec-ch-ua': '"Not(A:Brand";v="99", "Google Chrome";v="133", "Chromium";v="133"',
|
||||
'dnt': '1',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'origin': self.host,
|
||||
'sec-fetch-site': 'cross-site',
|
||||
'sec-fetch-mode': 'cors',
|
||||
'sec-fetch-dest': 'empty',
|
||||
'referer': f'{self.host}/',
|
||||
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
|
||||
'priority': 'u=1, i',
|
||||
}
|
||||
ids=self.d64(id).split('@@@@')
|
||||
return {'parse': int(ids[0]), 'url': ids[1], 'header': headers}
|
||||
|
||||
def localProxy(self, param):
|
||||
pass
|
||||
|
||||
def gethost(self):
|
||||
try:
|
||||
response = self.fetch('https://www.pornhub.com',headers=self.headers,allow_redirects=False)
|
||||
return response.headers['Location'][:-1]
|
||||
except Exception as e:
|
||||
print(f"获取主页失败: {str(e)}")
|
||||
return "https://www.pornhub.com"
|
||||
|
||||
def e64(self, text):
|
||||
try:
|
||||
text_bytes = text.encode('utf-8')
|
||||
encoded_bytes = b64encode(text_bytes)
|
||||
return encoded_bytes.decode('utf-8')
|
||||
except Exception as e:
|
||||
print(f"Base64编码错误: {str(e)}")
|
||||
return ""
|
||||
|
||||
def d64(self,encoded_text):
|
||||
try:
|
||||
encoded_bytes = encoded_text.encode('utf-8')
|
||||
decoded_bytes = b64decode(encoded_bytes)
|
||||
return decoded_bytes.decode('utf-8')
|
||||
except Exception as e:
|
||||
print(f"Base64解码错误: {str(e)}")
|
||||
return ""
|
||||
|
||||
def getlist(self, data):
|
||||
vlist=[]
|
||||
for i in data.items():
|
||||
vlist.append({
|
||||
'vod_id': i('a').attr('href'),
|
||||
'vod_name': i('a').attr('title'),
|
||||
'vod_pic': i('img').attr('src'),
|
||||
'vod_remarks': i('.bgShadeEffect').text() or i('.duration').text(),
|
||||
'style': {'ratio': 1.33, 'type': 'rect'}
|
||||
})
|
||||
return vlist
|
||||
|
||||
def getpq(self, path):
|
||||
try:
|
||||
response = self.session.get(f'{self.host}{path}').text
|
||||
return pq(response.encode('utf-8'))
|
||||
except Exception as e:
|
||||
print(f"请求失败: , {str(e)}")
|
||||
return None
|
270
PY/Xhm.py
Normal file
270
PY/Xhm.py
Normal file
@ -0,0 +1,270 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# by @嗷呜
|
||||
import json
|
||||
import sys
|
||||
from base64 import b64decode, b64encode
|
||||
from pyquery import PyQuery as pq
|
||||
from requests import Session
|
||||
sys.path.append('..')
|
||||
from base.spider import Spider
|
||||
|
||||
|
||||
class Spider(Spider):
|
||||
|
||||
def init(self, extend=""):
|
||||
self.host = self.gethost()
|
||||
self.headers['referer'] = f'{self.host}/'
|
||||
self.session = Session()
|
||||
self.session.headers.update(self.headers)
|
||||
pass
|
||||
|
||||
def getName(self):
|
||||
pass
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
pass
|
||||
|
||||
def manualVideoCheck(self):
|
||||
pass
|
||||
|
||||
def destroy(self):
|
||||
pass
|
||||
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36',
|
||||
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
|
||||
'sec-ch-ua': '"Not(A:Brand";v="99", "Google Chrome";v="133", "Chromium";v="133"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-full-version': '"133.0.6943.98"',
|
||||
'sec-ch-ua-arch': '"x86"',
|
||||
'sec-ch-ua-platform': '"Windows"',
|
||||
'sec-ch-ua-platform-version': '"19.0.0"',
|
||||
'sec-ch-ua-model': '""',
|
||||
'sec-ch-ua-full-version-list': '"Not(A:Brand";v="99.0.0.0", "Google Chrome";v="133.0.6943.98", "Chromium";v="133.0.6943.98"',
|
||||
'dnt': '1',
|
||||
'upgrade-insecure-requests': '1',
|
||||
'sec-fetch-site': 'none',
|
||||
'sec-fetch-mode': 'navigate',
|
||||
'sec-fetch-user': '?1',
|
||||
'sec-fetch-dest': 'document',
|
||||
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
|
||||
'priority': 'u=0, i'
|
||||
}
|
||||
|
||||
def homeContent(self, filter):
|
||||
result = {}
|
||||
cateManual = {
|
||||
"4K": "/4k",
|
||||
"国产": "two_click_/categories/chinese",
|
||||
"最新": "/newest",
|
||||
"最佳": "/best",
|
||||
"频道": "/channels",
|
||||
"类别": "/categories",
|
||||
"明星": "/pornstars"
|
||||
}
|
||||
classes = []
|
||||
filters = {}
|
||||
for k in cateManual:
|
||||
classes.append({
|
||||
'type_name': k,
|
||||
'type_id': cateManual[k]
|
||||
})
|
||||
if k !='4K':filters[cateManual[k]]=[{'key':'type','name':'类型','value':[{'n':'4K','v':'/4k'}]}]
|
||||
result['class'] = classes
|
||||
result['filters'] = filters
|
||||
return result
|
||||
|
||||
def homeVideoContent(self):
|
||||
data = self.getpq()
|
||||
return {'list': self.getlist(data(".thumb-list--sidebar .thumb-list__item"))}
|
||||
|
||||
def categoryContent(self, tid, pg, filter, extend):
|
||||
vdata = []
|
||||
result = {}
|
||||
result['page'] = pg
|
||||
result['pagecount'] = 9999
|
||||
result['limit'] = 90
|
||||
result['total'] = 999999
|
||||
if tid in ['/4k', '/newest', '/best'] or 'two_click_' in tid:
|
||||
if 'two_click_' in tid: tid = tid.split('click_')[-1]
|
||||
data = self.getpq(f'{tid}{extend.get("type","")}/{pg}')
|
||||
vdata = self.getlist(data(".thumb-list--sidebar .thumb-list__item"))
|
||||
elif tid == '/channels':
|
||||
data = self.getpq(f'{tid}/{pg}')
|
||||
jsdata = self.getjsdata(data)
|
||||
for i in jsdata['channels']:
|
||||
vdata.append({
|
||||
'vod_id': f"two_click_" + i.get('channelURL'),
|
||||
'vod_name': i.get('channelName'),
|
||||
'vod_pic': i.get('siteLogoURL'),
|
||||
'vod_year': f'videos:{i.get("videoCount")}',
|
||||
'vod_tag': 'folder',
|
||||
'vod_remarks': f'subscribers:{i["subscriptionModel"].get("subscribers")}',
|
||||
'style': {'ratio': 1.33, 'type': 'rect'}
|
||||
})
|
||||
elif tid == '/categories':
|
||||
result['pagecount'] = pg
|
||||
data = self.getpq(tid)
|
||||
self.cdata = self.getjsdata(data)
|
||||
for i in self.cdata['layoutPage']['store']['popular']['assignable']:
|
||||
vdata.append({
|
||||
'vod_id': "one_click_" + i.get('id'),
|
||||
'vod_name': i.get('name'),
|
||||
'vod_pic': '',
|
||||
'vod_tag': 'folder',
|
||||
'style': {'ratio': 1.33, 'type': 'rect'}
|
||||
})
|
||||
elif tid == '/pornstars':
|
||||
data = self.getpq(f'{tid}/{pg}')
|
||||
pdata = self.getjsdata(data)
|
||||
for i in pdata['pagesPornstarsComponent']['pornstarListProps']['pornstars']:
|
||||
vdata.append({
|
||||
'vod_id': f"two_click_" + i.get('pageURL'),
|
||||
'vod_name': i.get('name'),
|
||||
'vod_pic': i.get('imageThumbUrl'),
|
||||
'vod_remarks': i.get('translatedCountryName'),
|
||||
'vod_tag': 'folder',
|
||||
'style': {'ratio': 1.33, 'type': 'rect'}
|
||||
})
|
||||
elif 'one_click' in tid:
|
||||
result['pagecount'] = pg
|
||||
tid = tid.split('click_')[-1]
|
||||
for i in self.cdata['layoutPage']['store']['popular']['assignable']:
|
||||
if i.get('id') == tid:
|
||||
for j in i['items']:
|
||||
vdata.append({
|
||||
'vod_id': f"two_click_" + j.get('url'),
|
||||
'vod_name': j.get('name'),
|
||||
'vod_pic': j.get('thumb'),
|
||||
'vod_tag': 'folder',
|
||||
'style': {'ratio': 1.33, 'type': 'rect'}
|
||||
})
|
||||
result['list'] = vdata
|
||||
return result
|
||||
|
||||
def detailContent(self, ids):
|
||||
data = self.getpq(ids[0])
|
||||
djs = self.getjsdata(data)
|
||||
vn = data('meta[property="og:title"]').attr('content')
|
||||
dtext = data('#video-tags-list-container')
|
||||
href = dtext('a').attr('href')
|
||||
title = dtext('span[class*="body-bold-"]').eq(0).text()
|
||||
pdtitle = ''
|
||||
if href:
|
||||
pdtitle = '[a=cr:' + json.dumps({'id': 'two_click_' + href, 'name': title}) + '/]' + title + '[/a]'
|
||||
vod = {
|
||||
'vod_name': vn,
|
||||
'vod_director': pdtitle,
|
||||
'vod_remarks': data('.rb-new__info').text(),
|
||||
'vod_play_from': 'Xhamster',
|
||||
'vod_play_url': ''
|
||||
}
|
||||
try:
|
||||
plist = []
|
||||
d = djs['xplayerSettings']['sources']
|
||||
f = d.get('standard')
|
||||
def custom_sort_key(url):
|
||||
quality = url.split('$')[0]
|
||||
number = ''.join(filter(str.isdigit, quality))
|
||||
number = int(number) if number else 0
|
||||
return -number, quality
|
||||
|
||||
if f:
|
||||
for key, value in f.items():
|
||||
if isinstance(value, list):
|
||||
for info in value:
|
||||
id = self.e64(f'{0}@@@@{info.get("url") or info.get("fallback")}')
|
||||
plist.append(f"{info.get('label') or info.get('quality')}${id}")
|
||||
plist.sort(key=custom_sort_key)
|
||||
if d.get('hls'):
|
||||
for format_type, info in d['hls'].items():
|
||||
if url := info.get('url'):
|
||||
encoded = self.e64(f'{0}@@@@{url}')
|
||||
plist.append(f"{format_type}${encoded}")
|
||||
|
||||
except Exception as e:
|
||||
plist = [f"{vn}${self.e64(f'{1}@@@@{ids[0]}')}"]
|
||||
print(f"获取视频信息失败: {str(e)}")
|
||||
vod['vod_play_url'] = '#'.join(plist)
|
||||
return {'list': [vod]}
|
||||
|
||||
def searchContent(self, key, quick, pg="1"):
|
||||
data = self.getpq(f'/search/{key}?page={pg}')
|
||||
return {'list': self.getlist(data(".thumb-list--sidebar .thumb-list__item")), 'page': pg}
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.5410.0 Safari/537.36',
|
||||
'pragma': 'no-cache',
|
||||
'cache-control': 'no-cache',
|
||||
'sec-ch-ua-platform': '"Windows"',
|
||||
'sec-ch-ua': '"Not(A:Brand";v="99", "Google Chrome";v="133", "Chromium";v="133"',
|
||||
'dnt': '1',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'origin': self.host,
|
||||
'sec-fetch-site': 'cross-site',
|
||||
'sec-fetch-mode': 'cors',
|
||||
'sec-fetch-dest': 'empty',
|
||||
'referer': f'{self.host}/',
|
||||
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
|
||||
'priority': 'u=1, i',
|
||||
}
|
||||
ids = self.d64(id).split('@@@@')
|
||||
return {'parse': int(ids[0]), 'url': ids[1], 'header': headers}
|
||||
|
||||
def localProxy(self, param):
|
||||
pass
|
||||
|
||||
def gethost(self):
|
||||
try:
|
||||
response = self.fetch('https://xhamster.com', headers=self.headers, allow_redirects=False)
|
||||
return response.headers['Location']
|
||||
except Exception as e:
|
||||
print(f"获取主页失败: {str(e)}")
|
||||
return "https://zn.xhamster.com"
|
||||
|
||||
def e64(self, text):
|
||||
try:
|
||||
text_bytes = text.encode('utf-8')
|
||||
encoded_bytes = b64encode(text_bytes)
|
||||
return encoded_bytes.decode('utf-8')
|
||||
except Exception as e:
|
||||
print(f"Base64编码错误: {str(e)}")
|
||||
return ""
|
||||
|
||||
def d64(self, encoded_text):
|
||||
try:
|
||||
encoded_bytes = encoded_text.encode('utf-8')
|
||||
decoded_bytes = b64decode(encoded_bytes)
|
||||
return decoded_bytes.decode('utf-8')
|
||||
except Exception as e:
|
||||
print(f"Base64解码错误: {str(e)}")
|
||||
return ""
|
||||
|
||||
def getlist(self, data):
|
||||
vlist = []
|
||||
for i in data.items():
|
||||
vlist.append({
|
||||
'vod_id': i('.role-pop').attr('href'),
|
||||
'vod_name': i('.video-thumb-info a').text(),
|
||||
'vod_pic': i('.role-pop img').attr('src'),
|
||||
'vod_year': i('.video-thumb-info .video-thumb-views').text().split(' ')[0],
|
||||
'vod_remarks': i('.role-pop div[data-role="video-duration"]').text(),
|
||||
'style': {'ratio': 1.33, 'type': 'rect'}
|
||||
})
|
||||
return vlist
|
||||
|
||||
def getpq(self, path=''):
|
||||
h = '' if path.startswith('http') else self.host
|
||||
response = self.session.get(f'{h}{path}').text
|
||||
try:
|
||||
return pq(response)
|
||||
except Exception as e:
|
||||
print(f"{str(e)}")
|
||||
return pq(response.encode('utf-8'))
|
||||
|
||||
def getjsdata(self, data):
|
||||
vhtml = data("script[id='initials-script']").text()
|
||||
jst = json.loads(vhtml.split('initials=')[-1][:-1])
|
||||
return jst
|
||||
|
260
PY/Xvd.py
Normal file
260
PY/Xvd.py
Normal file
@ -0,0 +1,260 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# by @嗷呜
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
from pyquery import PyQuery as pq
|
||||
from base64 import b64decode, b64encode
|
||||
from requests import Session
|
||||
sys.path.append('..')
|
||||
from base.spider import Spider
|
||||
|
||||
|
||||
class Spider(Spider):
|
||||
def init(self, extend=""):
|
||||
self.headers['referer']=f'{self.host}/'
|
||||
self.session = Session()
|
||||
self.session.headers.update(self.headers)
|
||||
pass
|
||||
|
||||
def getName(self):
|
||||
pass
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
pass
|
||||
|
||||
def manualVideoCheck(self):
|
||||
pass
|
||||
|
||||
def destroy(self):
|
||||
pass
|
||||
|
||||
host = "https://www.xvideos.com"
|
||||
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36',
|
||||
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
|
||||
'sec-ch-ua': '"Not(A:Brand";v="99", "Google Chrome";v="133", "Chromium";v="133"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-full-version': '"133.0.6943.98"',
|
||||
'sec-ch-ua-arch': '"x86"',
|
||||
'sec-ch-ua-platform': '"Windows"',
|
||||
'sec-ch-ua-platform-version': '"19.0.0"',
|
||||
'sec-ch-ua-model': '""',
|
||||
'sec-ch-ua-full-version-list': '"Not(A:Brand";v="99.0.0.0", "Google Chrome";v="133.0.6943.98", "Chromium";v="133.0.6943.98"',
|
||||
'dnt': '1',
|
||||
'upgrade-insecure-requests': '1',
|
||||
'sec-fetch-site': 'none',
|
||||
'sec-fetch-mode': 'navigate',
|
||||
'sec-fetch-user': '?1',
|
||||
'sec-fetch-dest': 'document',
|
||||
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
|
||||
'priority': 'u=0, i'
|
||||
}
|
||||
|
||||
def homeContent(self, filter):
|
||||
result = {}
|
||||
cateManual = {
|
||||
"最新": "/new",
|
||||
"最佳": "/best",
|
||||
"频道": "/channels-index",
|
||||
"标签": "/tags",
|
||||
"明星": "/pornstars-index"
|
||||
}
|
||||
classes = []
|
||||
for k in cateManual:
|
||||
classes.append({
|
||||
'type_name': k,
|
||||
'type_id': cateManual[k]
|
||||
})
|
||||
result['class'] = classes
|
||||
return result
|
||||
|
||||
def homeVideoContent(self):
|
||||
data = self.getpq()
|
||||
return {'list':self.getlist(data(".mozaique .frame-block"))}
|
||||
|
||||
def categoryContent(self, tid, pg, filter, extend):
|
||||
vdata = []
|
||||
result = {}
|
||||
page = f"/{int(pg) - 1}" if pg != '1' else ''
|
||||
result['page'] = pg
|
||||
result['pagecount'] = 9999
|
||||
result['limit'] = 90
|
||||
result['total'] = 999999
|
||||
if tid=='/new' or 'tags_click' in tid:
|
||||
if 'tags_click' in tid:tid=tid.split('click_')[-1]
|
||||
data=self.getpq(f'{tid}/{pg}')
|
||||
vdata=self.getlist(data(".mozaique .frame-block"))
|
||||
elif tid=='/best':
|
||||
if pg=='1':
|
||||
self.path=self.session.get(f'{self.host}{tid}',headers=self.headers,allow_redirects=False).headers['Location']
|
||||
data=self.getpq(f'{self.path}{page}')
|
||||
vdata=self.getlist(data(".mozaique .frame-block"))
|
||||
elif tid=='/channels-index' or tid=='/pornstars-index':
|
||||
data = self.getpq(f'{tid}{page}')
|
||||
vhtml=data(".mozaique .thumb-block")
|
||||
for i in vhtml.items():
|
||||
a = i('.thumb-inside .thumb a')
|
||||
match = re.search(r'src="([^"]+)"', a('script').text())
|
||||
img=''
|
||||
if match:
|
||||
img = match.group(1).strip()
|
||||
vdata.append({
|
||||
'vod_id': f"channels_click_{'/channels'if tid=='/channels-index' else ''}"+a.attr('href'),
|
||||
'vod_name': a('.profile-name').text() or i('.profile-name').text().replace('\xa0','/'),
|
||||
'vod_pic': img,
|
||||
'vod_tag': 'folder',
|
||||
'vod_remarks': i('.thumb-under .profile-counts').text(),
|
||||
'style': {'ratio': 1.33, 'type': 'rect'}
|
||||
})
|
||||
elif tid=='/tags':
|
||||
result['pagecount'] = pg
|
||||
vhtml = self.getpq(tid)
|
||||
vhtml = vhtml('.tags-list')
|
||||
for d in vhtml.items():
|
||||
for i in d('li a').items():
|
||||
vdata.append({
|
||||
'vod_id': "tags_click_"+i.attr('href'),
|
||||
'vod_name': i.attr('title') or i('b').text(),
|
||||
'vod_pic': '',
|
||||
'vod_tag': 'folder',
|
||||
'vod_remarks': i('.navbadge').text(),
|
||||
'style': {'ratio': 1.33, 'type': 'rect'}
|
||||
})
|
||||
elif 'channels_click' in tid:
|
||||
tid=tid.split('click_')[-1]
|
||||
headers=self.session.headers.copy()
|
||||
headers.update({'Accept': 'application/json, text/javascript, */*; q=0.01'})
|
||||
vhtml=self.post(f'{self.host}{tid}/videos/best/{int(pg)-1}',headers=headers).json()
|
||||
for i in vhtml['videos']:
|
||||
vdata.append({
|
||||
'vod_id': i.get('u'),
|
||||
'vod_name': i.get('tf'),
|
||||
'vod_pic': i.get('il'),
|
||||
'vod_year': i.get('n'),
|
||||
'vod_remarks': i.get('d'),
|
||||
'style': {'ratio': 1.33, 'type': 'rect'}
|
||||
})
|
||||
result['list'] = vdata
|
||||
return result
|
||||
|
||||
def detailContent(self, ids):
|
||||
url = f"{self.host}{ids[0]}"
|
||||
data = self.getpq(ids[0])
|
||||
vn=data('meta[property="og:title"]').attr('content')
|
||||
dtext=data('.main-uploader a')
|
||||
href=dtext.attr('href')
|
||||
pdtitle=''
|
||||
if href and href.count('/') < 2:
|
||||
href=f'/channels{href}'
|
||||
pdtitle = '[a=cr:' + json.dumps({'id': 'channels_click_'+href, 'name': dtext('.name').text()}) + '/]' + dtext('.name').text() + '[/a]'
|
||||
vod = {
|
||||
'vod_name': vn,
|
||||
'vod_director':pdtitle,
|
||||
'vod_remarks': data('.page-title').text().replace(vn,''),
|
||||
'vod_play_from': 'Xvideos',
|
||||
'vod_play_url': ''
|
||||
}
|
||||
js_content = data("#video-player-bg script")
|
||||
jstr=''
|
||||
for script in js_content.items():
|
||||
content = script.text()
|
||||
if 'setVideoUrlLow' in content and 'html5player' in content:
|
||||
jstr = content
|
||||
break
|
||||
plist = [f"{vn}${self.e64(f'{1}@@@@{url}')}"]
|
||||
def extract_video_urls(js_content):
|
||||
try:
|
||||
low = re.search(r'setVideoUrlLow\([\'"]([^\'"]+)[\'"]\)', js_content)
|
||||
high = re.search(r'setVideoUrlHigh\([\'"]([^\'"]+)[\'"]\)', js_content)
|
||||
hls = re.search(r'setVideoHLS\([\'"]([^\'"]+)[\'"]\)', js_content)
|
||||
|
||||
return {
|
||||
'hls': hls.group(1) if hls else None,
|
||||
'high': high.group(1) if high else None,
|
||||
'low': low.group(1) if low else None
|
||||
}
|
||||
except Exception as e:
|
||||
print(f"提取视频URL失败: {str(e)}")
|
||||
return {}
|
||||
if jstr:
|
||||
try:
|
||||
urls = extract_video_urls(jstr)
|
||||
plist = [
|
||||
f"{quality}${self.e64(f'{0}@@@@{url}')}"
|
||||
for quality, url in urls.items()
|
||||
if url
|
||||
]
|
||||
except Exception as e:
|
||||
print(f"提取url失败: {str(e)}")
|
||||
vod['vod_play_url'] = '#'.join(plist)
|
||||
return {'list':[vod]}
|
||||
|
||||
def searchContent(self, key, quick, pg="1"):
|
||||
data=self.getpq(f'/?k={key}&p={int(pg)-1}')
|
||||
return {'list':self.getlist(data(".mozaique .frame-block")),'page':pg}
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.5410.0 Safari/537.36',
|
||||
'pragma': 'no-cache',
|
||||
'cache-control': 'no-cache',
|
||||
'sec-ch-ua-platform': '"Windows"',
|
||||
'sec-ch-ua': '"Not(A:Brand";v="99", "Google Chrome";v="133", "Chromium";v="133"',
|
||||
'dnt': '1',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'origin': self.host,
|
||||
'sec-fetch-site': 'cross-site',
|
||||
'sec-fetch-mode': 'cors',
|
||||
'sec-fetch-dest': 'empty',
|
||||
'referer': f'{self.host}/',
|
||||
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
|
||||
'priority': 'u=1, i',
|
||||
}
|
||||
ids=self.d64(id).split('@@@@')
|
||||
return {'parse': int(ids[0]), 'url': ids[1], 'header': headers}
|
||||
|
||||
def localProxy(self, param):
|
||||
pass
|
||||
|
||||
def e64(self, text):
|
||||
try:
|
||||
text_bytes = text.encode('utf-8')
|
||||
encoded_bytes = b64encode(text_bytes)
|
||||
return encoded_bytes.decode('utf-8')
|
||||
except Exception as e:
|
||||
print(f"Base64编码错误: {str(e)}")
|
||||
return ""
|
||||
|
||||
def d64(self,encoded_text):
|
||||
try:
|
||||
encoded_bytes = encoded_text.encode('utf-8')
|
||||
decoded_bytes = b64decode(encoded_bytes)
|
||||
return decoded_bytes.decode('utf-8')
|
||||
except Exception as e:
|
||||
print(f"Base64解码错误: {str(e)}")
|
||||
return ""
|
||||
|
||||
def getlist(self, data):
|
||||
vlist=[]
|
||||
for i in data.items():
|
||||
a=i('.thumb-inside .thumb a')
|
||||
b=i('.thumb-under .title a')
|
||||
vlist.append({
|
||||
'vod_id': a.attr('href'),
|
||||
'vod_name': b('a').attr('title'),
|
||||
'vod_pic': a('img').attr('data-src'),
|
||||
'vod_year': a('.video-hd-mark').text(),
|
||||
'vod_remarks': b('.duration').text(),
|
||||
'style': {'ratio': 1.33, 'type': 'rect'}
|
||||
})
|
||||
return vlist
|
||||
|
||||
def getpq(self, path=''):
|
||||
response = self.session.get(f'{self.host}{path}').text
|
||||
try:
|
||||
return pq(response)
|
||||
except Exception as e:
|
||||
print(f"{str(e)}")
|
||||
return pq(response.encode('utf-8'))
|
146
PY/hitvAPP.py
Normal file
146
PY/hitvAPP.py
Normal file
@ -0,0 +1,146 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# by @嗷呜
|
||||
import sys
|
||||
sys.path.append('..')
|
||||
from base.spider import Spider
|
||||
import requests
|
||||
|
||||
|
||||
class Spider(Spider):
|
||||
|
||||
def init(self, extend=""):
|
||||
pass
|
||||
|
||||
def getName(self):
|
||||
return "hitv"
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
pass
|
||||
|
||||
def manualVideoCheck(self):
|
||||
pass
|
||||
|
||||
def destroy(self):
|
||||
pass
|
||||
|
||||
def homeContent(self, filter):
|
||||
result = {}
|
||||
cateManual = {
|
||||
# "直播": "live",
|
||||
'排行榜': 'rank',
|
||||
"电影": "1",
|
||||
"剧集": "2",
|
||||
"综艺": "3",
|
||||
"动画": "4",
|
||||
"短片": "5"
|
||||
}
|
||||
classes = []
|
||||
for k in cateManual:
|
||||
classes.append({
|
||||
'type_name': k,
|
||||
'type_id': cateManual[k]
|
||||
})
|
||||
result['class'] = classes
|
||||
return result
|
||||
|
||||
host = "https://wys.upfuhn.com"
|
||||
headers = {
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
|
||||
"Chrome/80.0.3987.149 Safari/537.36"
|
||||
}
|
||||
|
||||
def list(self, list):
|
||||
videos = []
|
||||
for it in list:
|
||||
videos.append({
|
||||
"vod_id": it['video_site_id'],
|
||||
"vod_name": it['video_name'],
|
||||
"vod_pic": it['video_horizontal_url'] or it['video_vertical_url'],
|
||||
"vod_remarks": it['newest_series_num'],
|
||||
"vod_year": it['years'],
|
||||
})
|
||||
return videos
|
||||
|
||||
def homeVideoContent(self):
|
||||
url = f'{self.host}/v1/ys_video_sites/hot?t=1'
|
||||
data = requests.get(url, headers=self.headers).json()
|
||||
videos = self.list(data['data']['data'])
|
||||
result = {'list': videos}
|
||||
return result
|
||||
|
||||
def categoryContent(self, tid, pg, filter, extend):
|
||||
path = f'/v1/ys_video_sites?t={tid}&s_t=0&a&y&o=0&ps=21&pn={pg}'
|
||||
rank = False
|
||||
if tid == 'rank':
|
||||
if pg == 1:
|
||||
path = f'/v1/ys_video_sites/ranking'
|
||||
rank = True
|
||||
else:
|
||||
path = ''
|
||||
# elif tid == 'live' and pg == 1:
|
||||
# path = f'/v1/ys_live_tvs'
|
||||
videos = []
|
||||
result = {}
|
||||
try:
|
||||
data = requests.get(self.host + path, headers=self.headers).json()
|
||||
if rank:
|
||||
for video in data['data']:
|
||||
videos.extend(data['data'][video])
|
||||
else:
|
||||
videos = data['data']['data']
|
||||
result = {}
|
||||
result['list'] = self.list(videos)
|
||||
result['page'] = pg
|
||||
result['pagecount'] = 9999
|
||||
result['limit'] = 90
|
||||
result['total'] = 999999
|
||||
except:
|
||||
result['list'] = []
|
||||
return result
|
||||
|
||||
def detailContent(self, ids):
|
||||
tid = ids[0]
|
||||
url = f'{self.host}/v1/ys_video_series/by_vid/{tid}'
|
||||
data = requests.get(url, headers=self.headers).json()
|
||||
data1 = data['data']['ys_video_site']
|
||||
urls = []
|
||||
for it in data['data']['data']:
|
||||
urls.append(it['series_num'] + '$' + it['video_url'])
|
||||
vod = {
|
||||
'vod_name': data1['video_name'],
|
||||
'type_name': data1['tag'],
|
||||
'vod_year': data1['years'],
|
||||
'vod_area': data1['area'],
|
||||
'vod_director': data1['main_actor'],
|
||||
'vod_content': data1['video_desc'],
|
||||
'vod_play_from': '嗷呜在线',
|
||||
'vod_play_url': '#'.join(urls),
|
||||
}
|
||||
result = {
|
||||
'list': [
|
||||
vod
|
||||
]
|
||||
}
|
||||
return result
|
||||
|
||||
def searchContent(self, key, quick, pg=1):
|
||||
url = f'{self.host}/v1/ys_video_sites/search?s={key}&o=0&ps=200&pn={pg}'
|
||||
data = requests.get(url, headers=self.headers).json()
|
||||
videos = data['data']['video_sites']
|
||||
if data['data']['first_video_series'] is not None:
|
||||
videos = [data['data']['first_video_series']] + videos
|
||||
result = {}
|
||||
result['list'] = self.list(videos)
|
||||
result['page'] = pg
|
||||
return result
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
result = {
|
||||
'url': id,
|
||||
'parse': 0,
|
||||
'header': self.headers
|
||||
}
|
||||
return result
|
||||
|
||||
def localProxy(self, param):
|
||||
pass
|
212
PY/lavAPP.py
Normal file
212
PY/lavAPP.py
Normal file
@ -0,0 +1,212 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# by @嗷呜
|
||||
import sys
|
||||
from base64 import b64encode, b64decode
|
||||
from Crypto.Hash import MD5, SHA256
|
||||
sys.path.append('..')
|
||||
from base.spider import Spider
|
||||
from Crypto.Cipher import AES
|
||||
import json
|
||||
import time
|
||||
|
||||
|
||||
class Spider(Spider):
|
||||
|
||||
def getName(self):
|
||||
return "lav"
|
||||
|
||||
def init(self, extend=""):
|
||||
self.id = self.ms(str(int(time.time() * 1000)))[:16]
|
||||
pass
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
pass
|
||||
|
||||
def manualVideoCheck(self):
|
||||
pass
|
||||
|
||||
def action(self, action):
|
||||
pass
|
||||
|
||||
def destroy(self):
|
||||
pass
|
||||
|
||||
host = "http://sir_new.tiansexyl.tv"
|
||||
t = str(int(time.time() * 1000))
|
||||
headers = {'User-Agent': 'okhttp-okgo/jeasonlzy', 'Connection': 'Keep-Alive',
|
||||
'Content-Type': 'application/x-www-form-urlencoded'}
|
||||
|
||||
def homeContent(self, filter):
|
||||
cateManual = {"演员": "actor", "分类": "avsearch", }
|
||||
classes = []
|
||||
for k in cateManual:
|
||||
classes.append({'type_name': k, 'type_id': cateManual[k]})
|
||||
j = {'code': 'homePage', 'mod': 'down', 'channel': 'self', 'via': 'agent', 'bundleId': 'com.tvlutv',
|
||||
'app_type': 'rn', 'os_version': '12.0.5', 'version': '3.2.3', 'oauth_type': 'android_rn',
|
||||
'oauth_id': self.id}
|
||||
|
||||
body = self.aes(j)
|
||||
data = self.post(f'{self.host}/api.php?t={str(int(time.time() * 1000))}', data=body, headers=self.headers).json()['data']
|
||||
data1 = self.aes(data, False)['data']
|
||||
self.r = data1['r']
|
||||
for i, d in enumerate(data1['avTag']):
|
||||
# if i == 4:
|
||||
# break
|
||||
classes.append({'type_name': d['name'], 'type_id': d['tag']})
|
||||
resutl = {}
|
||||
resutl["class"] = classes
|
||||
return resutl
|
||||
|
||||
def homeVideoContent(self):
|
||||
pass
|
||||
|
||||
def categoryContent(self, tid, pg, filter, extend):
|
||||
id = tid.split("@@")
|
||||
result = {}
|
||||
result["page"] = pg
|
||||
result["pagecount"] = 9999
|
||||
result["limit"] = 90
|
||||
result["total"] = 999999
|
||||
if id[0] == 'avsearch':
|
||||
if pg == '1':
|
||||
j = {'code': 'avsearch', 'mod': 'search', 'channel': 'self', 'via': 'agent', 'bundleId': 'com.tvlutv',
|
||||
'app_type': 'rn', 'os_version': '12.0.5', 'version': '3.2.3', 'oauth_type': 'android_rn',
|
||||
'oauth_id': self.id}
|
||||
if len(id) > 1:
|
||||
j = {'code': 'find', 'mod': 'tag', 'channel': 'self', 'via': 'agent', 'bundleId': 'com.tvlutv',
|
||||
'app_type': 'rn', 'os_version': '12.0.5', 'version': '3.2.3', 'oauth_type': 'android_rn',
|
||||
'oauth_id': self.id, 'type': 'av', 'dis': 'new', 'page': str(pg), 'tag': id[1]}
|
||||
elif id[0] == 'actor':
|
||||
j = {'mod': 'actor', 'channel': 'self', 'via': 'agent', 'bundleId': 'com.tvlutv', 'app_type': 'rn',
|
||||
'os_version': '12.0.5', 'version': '3.2.3', 'oauth_type': 'android_rn', 'oauth_id': self.id,
|
||||
'page': str(pg), 'filter': ''}
|
||||
if len(id) > 1:
|
||||
j = {'code': 'eq', 'mod': 'actor', 'channel': 'self', 'via': 'agent', 'bundleId': 'com.tvlutv',
|
||||
'app_type': 'rn', 'os_version': '12.0.5', 'version': '3.2.3', 'oauth_type': 'android_rn',
|
||||
'oauth_id': self.id, 'page': str(pg), 'id': id[1], 'actor': id[2]}
|
||||
else:
|
||||
j = {'code': 'search', 'mod': 'av', 'channel': 'self', 'via': 'agent', 'bundleId': 'com.tvlutv',
|
||||
'app_type': 'rn', 'os_version': '12.0.5', 'version': '3.2.3', 'oauth_type': 'android_rn',
|
||||
'oauth_id': self.id, 'page': str(pg), 'tag': id[0]}
|
||||
|
||||
body = self.aes(j)
|
||||
data = self.post(f'{self.host}/api.php?t={str(int(time.time() * 1000))}', data=body, headers=self.headers).json()['data']
|
||||
data1 = self.aes(data, False)['data']
|
||||
videos = []
|
||||
if tid == 'avsearch' and len(id) == 1:
|
||||
for item in data1:
|
||||
videos.append({"vod_id": id[0] + "@@" + str(item.get('tags')), 'vod_name': item.get('name'),
|
||||
'vod_pic': self.imgs(item.get('ico')), 'vod_tag': 'folder',
|
||||
'style': {"type": "rect", "ratio": 1.33}})
|
||||
elif tid == 'actor' and len(id) == 1:
|
||||
for item in data1:
|
||||
videos.append({"vod_id": id[0] + "@@" + str(item.get('id')) + "@@" + item.get('name'),
|
||||
'vod_name': item.get('name'), 'vod_pic': self.imgs(item.get('cover')),
|
||||
'vod_tag': 'folder', 'style': {"type": "oval"}})
|
||||
else:
|
||||
for item in data1:
|
||||
if item.get('_id'):
|
||||
videos.append({"vod_id": str(item.get('id')), 'vod_name': item.get('title'),
|
||||
'vod_pic': self.imgs(item.get('cover_thumb') or item.get('cover_full')),
|
||||
'vod_remarks': item.get('good'), 'style': {"type": "rect", "ratio": 1.33}})
|
||||
result["list"] = videos
|
||||
return result
|
||||
|
||||
def detailContent(self, ids):
|
||||
id = ids[0]
|
||||
j = {'code': 'detail', 'mod': 'av', 'channel': 'self', 'via': 'agent', 'bundleId': 'com.tvlutv',
|
||||
'app_type': 'rn', 'os_version': '12.0.5', 'version': '3.2.3', 'oauth_type': 'android_rn',
|
||||
'oauth_id': self.id, 'id': id}
|
||||
body = self.aes(j)
|
||||
data = self.post(f'{self.host}/api.php?t={str(int(time.time() * 1000))}', data=body, headers=self.headers).json()['data']
|
||||
data1 = self.aes(data, False)['line']
|
||||
vod = {}
|
||||
play = []
|
||||
for itt in data1:
|
||||
a = itt['line'].get('s720')
|
||||
if a:
|
||||
b = a.split('.')
|
||||
b[0] = 'https://m3u8'
|
||||
a = '.'.join(b)
|
||||
play.append(itt['info']['tips'] + "$" + a)
|
||||
break
|
||||
vod["vod_play_from"] = 'LAV'
|
||||
vod["vod_play_url"] = "#".join(play)
|
||||
result = {"list": [vod]}
|
||||
return result
|
||||
|
||||
def searchContent(self, key, quick, pg="1"):
|
||||
pass
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
url = self.getProxyUrl() + "&url=" + b64encode(id.encode('utf-8')).decode('utf-8') + "&type=m3u8"
|
||||
self.hh = {'User-Agent': 'dd', 'Connection': 'Keep-Alive', 'Referer': self.r}
|
||||
result = {}
|
||||
result["parse"] = 0
|
||||
result["url"] = url
|
||||
result["header"] = self.hh
|
||||
return result
|
||||
|
||||
def localProxy(self, param):
|
||||
url = param["url"]
|
||||
if param.get('type') == "m3u8":
|
||||
return self.vod(b64decode(url).decode('utf-8'))
|
||||
else:
|
||||
return self.img(url)
|
||||
|
||||
def vod(self, url):
|
||||
data = self.fetch(url, headers=self.hh).text
|
||||
key = bytes.fromhex("13d47399bda541b85e55830528d4e66f1791585b2d2216f23215c4c63ebace31")
|
||||
iv = bytes.fromhex(data[:32])
|
||||
data = data[32:]
|
||||
cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=128)
|
||||
data_bytes = bytes.fromhex(data)
|
||||
decrypted = cipher.decrypt(data_bytes)
|
||||
encoded = decrypted.decode("utf-8").replace("\x08", "")
|
||||
return [200, "application/vnd.apple.mpegur", encoded]
|
||||
|
||||
def imgs(self, url):
|
||||
return self.getProxyUrl() + '&url=' + url
|
||||
|
||||
def img(self, url):
|
||||
type = url.split('.')[-1]
|
||||
data = self.fetch(url).text
|
||||
key = bytes.fromhex("ba78f184208d775e1553550f2037f4af22cdcf1d263a65b4d5c74536f084a4b2")
|
||||
iv = bytes.fromhex(data[:32])
|
||||
data = data[32:]
|
||||
cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=128)
|
||||
data_bytes = bytes.fromhex(data)
|
||||
decrypted = cipher.decrypt(data_bytes)
|
||||
return [200, f"image/{type}", decrypted]
|
||||
|
||||
def ms(self, data, m=False):
|
||||
h = MD5.new()
|
||||
if m:
|
||||
h = SHA256.new()
|
||||
h.update(data.encode('utf-8'))
|
||||
return h.hexdigest()
|
||||
|
||||
def aes(self, data, operation=True):
|
||||
key = bytes.fromhex("620f15cfdb5c79c34b3940537b21eda072e22f5d7151456dec3932d7a2b22c53")
|
||||
t = str(int(time.time()))
|
||||
ivt = self.ms(t)
|
||||
if operation:
|
||||
data = json.dumps(data, separators=(',', ':'))
|
||||
iv = bytes.fromhex(ivt)
|
||||
else:
|
||||
iv = bytes.fromhex(data[:32])
|
||||
data = data[32:]
|
||||
cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=128)
|
||||
if operation:
|
||||
data_bytes = data.encode('utf-8')
|
||||
encrypted = cipher.encrypt(data_bytes)
|
||||
ep = f'{ivt}{encrypted.hex()}'
|
||||
edata = f"data={ep}×tamp={t}0d27dfacef1338483561a46b246bf36d"
|
||||
sign = self.ms(self.ms(edata, True))
|
||||
edata = f"timestamp={t}&data={ep}&sign={sign}"
|
||||
return edata
|
||||
else:
|
||||
data_bytes = bytes.fromhex(data)
|
||||
decrypted = cipher.decrypt(data_bytes)
|
||||
return json.loads(decrypted.decode('utf-8'))
|
||||
|
175
PY/三号动漫APP.py
Normal file
175
PY/三号动漫APP.py
Normal file
@ -0,0 +1,175 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# by @嗷呜
|
||||
import json
|
||||
import sys
|
||||
from base64 import b64decode, b64encode
|
||||
from Crypto.Cipher import AES
|
||||
from Crypto.Util.Padding import unpad, pad
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
sys.path.append('..')
|
||||
from base.spider import Spider
|
||||
|
||||
|
||||
class Spider(Spider):
|
||||
|
||||
def init(self, extend=""):
|
||||
self.host = self.gethost()
|
||||
self.hkey,self.playerinfos=self.getinfo()
|
||||
pass
|
||||
|
||||
def getName(self):
|
||||
pass
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
pass
|
||||
|
||||
def manualVideoCheck(self):
|
||||
pass
|
||||
|
||||
def destroy(self):
|
||||
pass
|
||||
|
||||
headers = {
|
||||
'User-Agent': 'Dalvik/1.4.0 (Linux; U; Android 11; Redmi Build/M2012K10C)',
|
||||
'version': '1.4.0'
|
||||
}
|
||||
|
||||
keys=['rectangleadsadxa','aassddwwxxllsx1x']
|
||||
|
||||
def homeContent(self, filter):
|
||||
cdata=self.getdata('/api.php/v1.home/types')
|
||||
result = {}
|
||||
classes = []
|
||||
filters = {}
|
||||
for i in cdata['data']['types'][1:]:
|
||||
classes.append({
|
||||
'type_id': i['type_id'],
|
||||
'type_name': i['type_name']
|
||||
})
|
||||
with ThreadPoolExecutor(max_workers=len(classes)) as executor:
|
||||
futures = [executor.submit(self.getf, i['type_id'])
|
||||
for i in classes]
|
||||
for future in futures:
|
||||
try:
|
||||
type_id, filter_data = future.result()
|
||||
if len(filter_data):filters[type_id] = filter_data
|
||||
except Exception as e:
|
||||
print(f'处理筛选数据失败: {e}')
|
||||
result['class'] = classes
|
||||
result['filters'] = filters
|
||||
return result
|
||||
|
||||
def homeVideoContent(self):
|
||||
data=self.getdata('/api.php/v1.home/data?type_id=26')
|
||||
return {'list':data['data']['banners']}
|
||||
|
||||
def categoryContent(self, tid, pg, filter, extend):
|
||||
json_data = {
|
||||
'area': extend.get('area', '全部地区'),
|
||||
'lang': extend.get('lang', '全部语言'),
|
||||
'rank': extend.get('rank', '最新'),
|
||||
'type': extend.get('type', '全部类型'),
|
||||
'type_id': int(tid),
|
||||
'year': extend.get('year', '全部年代'),
|
||||
}
|
||||
data=self.getdata(f'/api.php/v1.classify/content?page={pg}',method=False,json_data=json_data)
|
||||
result = {}
|
||||
result['list'] = data['data']['video_list']
|
||||
result['page'] = pg
|
||||
result['pagecount'] = 9999
|
||||
result['limit'] = 90
|
||||
result['total'] = 999999
|
||||
return result
|
||||
|
||||
def detailContent(self, ids):
|
||||
data=self.getdata(f'/api.php/v1.player/details?vod_id={ids[0]}')
|
||||
vod = data['data']['detail']
|
||||
plist,names = [],[]
|
||||
for i in vod['play_url_list']:
|
||||
names.append(i['show'])
|
||||
plist.append('#'.join([f"{j['name']}${i['from']}@@{j['url']}" for j in i['urls']]))
|
||||
vod.pop('play_url_list', None)
|
||||
vod.update({'vod_play_from': '$$$'.join(names), 'vod_play_url': '$$$'.join(plist)})
|
||||
return {'list':[vod]}
|
||||
|
||||
def searchContent(self, key, quick, pg="1"):
|
||||
data=self.getdata(f'/api.php/v1.search/data?wd={key}&type_id=0&page={pg}')
|
||||
return {'list': data['data']['search_data'], 'page': pg}
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
ids=id.split('@@')
|
||||
try:
|
||||
body={'parse':self.getparse(ids[0]),'url':ids[-1],'matching':''}
|
||||
data=self.getdata(f'/shark/api.php?action=parsevod',method=False,data=body)
|
||||
url=data.get('url') or data['data'].get('url')
|
||||
if not url:
|
||||
raise ValueError("解析失败")
|
||||
p=0
|
||||
except:
|
||||
p,url = 1,ids[-1]
|
||||
return {'parse': p, 'url': url, 'header': {'User-Agent':'aliplayer(appv=1.4.0&av=6.16.0&av2=6.16.0_40316683&os=android&ov=11&dm=M2012K10C)'}}
|
||||
|
||||
def localProxy(self, param):
|
||||
pass
|
||||
|
||||
def getparse(self,id):
|
||||
for i in self.playerinfos:
|
||||
if i['playername']==id:
|
||||
j= i['playerjiekou']
|
||||
return self.aes(j,self.hkey)
|
||||
return ''
|
||||
|
||||
def gethost(self):
|
||||
headers = {
|
||||
'User-Agent': 'okhttp/4.11.0',
|
||||
'Connection': 'Keep-Alive'
|
||||
}
|
||||
response = self.fetch('https://shopv1.oss-accelerate.aliyuncs.com/api.txt', headers=headers).text
|
||||
host=json.loads(self.aes(response.strip(),self.keys[0]))[0]
|
||||
return host
|
||||
|
||||
def getinfo(self):
|
||||
data=self.post(f'{self.host}/shark/api.php?action=configs',headers=self.headers,data={'username':'','token':''}).text
|
||||
datas=json.loads(self.aes(data))
|
||||
hkey = datas['config']['hulue'].split('&')[0]
|
||||
playerinfos = datas['playerinfos']
|
||||
return hkey,playerinfos
|
||||
|
||||
def getdata(self,parh,method=True,data=None,json_data=None):
|
||||
url = f'{self.host}{parh}'
|
||||
if method:
|
||||
response = self.fetch(url, headers=self.headers).text
|
||||
else:
|
||||
response = self.post(url, headers=self.headers, data=data, json=json_data).text
|
||||
return json.loads(self.aes(response))
|
||||
|
||||
def getf(self, type_id):
|
||||
try:
|
||||
fdata = self.getdata(f'/api.php/v1.classify/types?type_id={type_id}')
|
||||
filter_list = []
|
||||
for key, value in fdata['data'].items():
|
||||
if len(value):
|
||||
filter_list.append({
|
||||
'key': key.split('_')[0],
|
||||
'name': key.split('_')[0],
|
||||
'value': [{'n': j['type_name'], 'v': j['type_name']} for j in value if j.get('type_name')]
|
||||
})
|
||||
return type_id, filter_list
|
||||
except Exception as e:
|
||||
print(f'获取type_id={type_id}的筛选数据失败: {e}')
|
||||
return type_id, []
|
||||
|
||||
def aes(self, word,key=None, b=True):
|
||||
if not key:key=self.keys[1]
|
||||
cipher = AES.new(key.encode(), AES.MODE_ECB)
|
||||
word = word.encode('utf-8-sig').decode('ascii', errors='ignore')
|
||||
if b:
|
||||
word = b64decode(word)
|
||||
decrypted = cipher.decrypt(word)
|
||||
word = unpad(decrypted, AES.block_size).decode()
|
||||
else:
|
||||
padded = pad(word.encode(), AES.block_size)
|
||||
encrypted = cipher.encrypt(padded)
|
||||
word = b64encode(encrypted).decode()
|
||||
return word
|
||||
|
301
PY/优.py
Normal file
301
PY/优.py
Normal file
@ -0,0 +1,301 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# by @嗷呜
|
||||
import json
|
||||
import sys
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from urllib.parse import quote
|
||||
from Crypto.Hash import MD5
|
||||
import requests
|
||||
sys.path.append('..')
|
||||
from base.spider import Spider
|
||||
|
||||
|
||||
class Spider(Spider):
|
||||
|
||||
def init(self, extend=""):
|
||||
self.session = requests.Session()
|
||||
self.session.headers.update(self.headers)
|
||||
self.session.cookies.update(self.cookie)
|
||||
self.get_ctoken()
|
||||
pass
|
||||
|
||||
def getName(self):
|
||||
pass
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
pass
|
||||
|
||||
def manualVideoCheck(self):
|
||||
pass
|
||||
|
||||
def destroy(self):
|
||||
pass
|
||||
|
||||
host='https://www.youku.com'
|
||||
|
||||
shost='https://search.youku.com'
|
||||
|
||||
h5host='https://acs.youku.com'
|
||||
|
||||
ihost='https://v.youku.com'
|
||||
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (; Windows 10.0.26100.3194_64 ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Electron/14.2.0 Safari/537.36 Node/14.17.0 YoukuDesktop/9.2.60 UOSYouku (2.0.1)-Electron(UTDID ZYmGMAAAACkDAMU8hbiMmYdd;CHANNEL official;ZREAL 0;BTYPE TM2013;BRAND TIMI;BUILDVER 9.2.60.1001)',
|
||||
'Referer': f'{host}/'
|
||||
}
|
||||
|
||||
cookie={
|
||||
"__ysuid": "17416134165380iB",
|
||||
"__aysid": "1741613416541WbD",
|
||||
"xlly_s": "1",
|
||||
"isI18n": "false",
|
||||
"cna": "bNdVIKmmsHgCAXW9W6yrQ1/s",
|
||||
"__ayft": "1741672162330",
|
||||
"__arpvid": "1741672162331FBKgrn-1741672162342",
|
||||
"__ayscnt": "1",
|
||||
"__aypstp": "1",
|
||||
"__ayspstp": "3",
|
||||
"tfstk": "gZbiib4JpG-6DqW-B98_2rwPuFrd1fTXQt3vHEp4YpJIBA3OgrWcwOi90RTOo9XVQ5tAM5NcK_CP6Ep97K2ce1XDc59v3KXAgGFLyzC11ET2n8U8yoyib67M3xL25e8gS8pbyzC1_ET4e8URWTsSnHv2uh8VTeJBgEuN3d-ELQAWuKWV36PHGpJ2uEWVTxvicLX1ewyUXYSekxMf-CxMEqpnoqVvshvP_pABOwvXjL5wKqeulm52np_zpkfCDGW9Ot4uKFIRwZtP7vP9_gfAr3KEpDWXSIfWRay-DHIc_Z-hAzkD1i5Ooi5LZ0O5YO_1mUc476YMI3R6xzucUnRlNe_zemKdm172xMwr2L7CTgIkbvndhFAVh3_YFV9Ng__52U4SQKIdZZjc4diE4EUxlFrfKmiXbBOHeP72v7sAahuTtWm78hRB1yV3tmg9bBOEhWVnq5KwOBL5."
|
||||
}
|
||||
|
||||
def homeContent(self, filter):
|
||||
result = {}
|
||||
categories = ["电视剧", "电影", "综艺", "动漫", "少儿", "纪录片", "文化", "亲子", "教育", "搞笑", "生活",
|
||||
"体育", "音乐", "游戏"]
|
||||
classes = [{'type_name': category, 'type_id': category} for category in categories]
|
||||
filters = {}
|
||||
self.typeid = {}
|
||||
with ThreadPoolExecutor(max_workers=len(categories)) as executor:
|
||||
tasks = {
|
||||
executor.submit(self.cf, {'type': category}, True): category
|
||||
for category in categories
|
||||
}
|
||||
|
||||
for future in as_completed(tasks):
|
||||
try:
|
||||
category = tasks[future]
|
||||
session, ft = future.result()
|
||||
filters[category] = ft
|
||||
self.typeid[category] = session
|
||||
except Exception as e:
|
||||
print(f"处理分类 {tasks[future]} 时出错: {str(e)}")
|
||||
|
||||
result['class'] = classes
|
||||
result['filters'] = filters
|
||||
return result
|
||||
|
||||
def homeVideoContent(self):
|
||||
try:
|
||||
vlist = []
|
||||
params={"ms_codes":"2019061000","params":"{\"debug\":0,\"gray\":0,\"pageNo\":1,\"utdid\":\"ZYmGMAAAACkDAMU8hbiMmYdd\",\"userId\":\"\",\"bizKey\":\"YOUKU_WEB\",\"appPackageKey\":\"com.youku.YouKu\",\"showNodeList\":0,\"reqSubNode\":0,\"nodeKey\":\"WEBHOME\",\"bizContext\":\"{\\\"spmA\\\":\\\"a2hja\\\"}\"}","system_info":"{\"device\":\"pcweb\",\"os\":\"pcweb\",\"ver\":\"1.0.0.0\",\"userAgent\":\"Mozilla/5.0 (; Windows 10.0.26100.3194_64 ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Electron/14.2.0 Safari/537.36 Node/14.17.0 YoukuDesktop/9.2.60 UOSYouku (2.0.1)-Electron(UTDID ZYmGMAAAACkDAMU8hbiMmYdd;CHANNEL official;ZREAL 0;BTYPE TM2013;BRAND TIMI;BUILDVER 9.2.60.1001)\",\"guid\":\"1590141704165YXe\",\"appPackageKey\":\"com.youku.pcweb\",\"young\":0,\"brand\":\"\",\"network\":\"\",\"ouid\":\"\",\"idfa\":\"\",\"scale\":\"\",\"operator\":\"\",\"resolution\":\"\",\"pid\":\"\",\"childGender\":0,\"zx\":0}"}
|
||||
data=self.getdata(f'{self.h5host}/h5/mtop.youku.columbus.home.query/1.0/',params)
|
||||
okey=list(data['data'].keys())[0]
|
||||
for i in data['data'][okey]['data']['nodes'][0]['nodes'][-1]['nodes'][0]['nodes']:
|
||||
if i.get('nodes') and i['nodes'][0].get('data'):
|
||||
i=i['nodes'][0]['data']
|
||||
if i.get('assignId'):
|
||||
vlist.append({
|
||||
'vod_id': i['assignId'],
|
||||
'vod_name': i.get('title'),
|
||||
'vod_pic': i.get('vImg') or i.get('img'),
|
||||
'vod_year': i.get('mark',{}).get('data',{}).get('text'),
|
||||
'vod_remarks': i.get('summary')
|
||||
})
|
||||
return {'list': vlist}
|
||||
except Exception as e:
|
||||
print(f"处理主页视频数据时出错: {str(e)}")
|
||||
return {'list': []}
|
||||
|
||||
def categoryContent(self, tid, pg, filter, extend):
|
||||
result = {}
|
||||
vlist = []
|
||||
result['page'] = pg
|
||||
result['limit'] = 90
|
||||
result['total'] = 999999
|
||||
pagecount = 9999
|
||||
params = {'type': tid}
|
||||
id = self.typeid[tid]
|
||||
params.update(extend)
|
||||
if pg == '1':
|
||||
id=self.cf(params)
|
||||
data=self.session.get(f'{self.host}/category/data?session={id}¶ms={quote(json.dumps(params))}&pageNo={pg}').json()
|
||||
try:
|
||||
data=data['data']['filterData']
|
||||
for i in data['listData']:
|
||||
if i.get('videoLink') and 's=' in i['videoLink']:
|
||||
vlist.append({
|
||||
'vod_id': i.get('videoLink').split('s=')[-1],
|
||||
'vod_name': i.get('title'),
|
||||
'vod_pic': i.get('img'),
|
||||
'vod_year': i.get('rightTagText'),
|
||||
'vod_remarks': i.get('summary')
|
||||
})
|
||||
self.typeid[tid]=quote(json.dumps(data['session']))
|
||||
except:
|
||||
pagecount=pg
|
||||
result['list'] = vlist
|
||||
result['pagecount'] = pagecount
|
||||
return result
|
||||
|
||||
def detailContent(self, ids):
|
||||
try:
|
||||
data=self.session.get(f'{self.ihost}/v_getvideo_info/?showId={ids[0]}').json()
|
||||
v=data['data']
|
||||
vod = {
|
||||
'type_name': v.get('showVideotype'),
|
||||
'vod_year': v.get('lastUpdate'),
|
||||
'vod_remarks': v.get('rc_title'),
|
||||
'vod_actor': v.get('_personNameStr'),
|
||||
'vod_content': v.get('showdesc'),
|
||||
'vod_play_from': '优酷',
|
||||
'vod_play_url': ''
|
||||
}
|
||||
params={"biz":"new_detail_web2","videoId":v.get('vid'),"scene":"web_page","componentVersion":"3","ip":data.get('ip'),"debug":0,"utdid":"ZYmGMAAAACkDAMU8hbiMmYdd","userId":0,"platform":"pc","nextSession":"","gray":0,"source":"pcNoPrev","showId":ids[0]}
|
||||
sdata,index=self.getinfo(params)
|
||||
pdata=sdata['nodes']
|
||||
if index > len(pdata):
|
||||
batch_size = len(pdata)
|
||||
total_batches = ((index + batch_size - 1) // batch_size) - 1
|
||||
ssj = json.loads(sdata['data']['session'])
|
||||
with ThreadPoolExecutor(max_workers=total_batches) as executor:
|
||||
futures = []
|
||||
for batch in range(total_batches):
|
||||
start = batch_size + 1 + (batch * batch_size)
|
||||
end = start + batch_size - 1
|
||||
next_session = ssj.copy()
|
||||
next_session.update({
|
||||
"itemStartStage": start,
|
||||
"itemEndStage": min(end, index)
|
||||
})
|
||||
current_params = params.copy()
|
||||
current_params['nextSession'] = json.dumps(next_session)
|
||||
futures.append((start, executor.submit(self.getvinfo, current_params)))
|
||||
futures.sort(key=lambda x: x[0])
|
||||
|
||||
for _, future in futures:
|
||||
try:
|
||||
result = future.result()
|
||||
pdata.extend(result['nodes'])
|
||||
except Exception as e:
|
||||
print(f"Error fetching data: {str(e)}")
|
||||
vod['vod_play_url'] = '#'.join([f"{i['data'].get('title')}${i['data']['action'].get('value')}" for i in pdata])
|
||||
return {'list': [vod]}
|
||||
except Exception as e:
|
||||
print(e)
|
||||
return {'list': [{'vod_play_from': '哎呀翻车啦', 'vod_play_url': f'呜呜呜${self.host}'}]}
|
||||
|
||||
def searchContent(self, key, quick, pg="1"):
|
||||
data=self.session.get(f'{self.shost}/api/search?pg={pg}&keyword={key}').json()
|
||||
vlist = []
|
||||
for i in data['pageComponentList']:
|
||||
if i.get('commonData') and (i['commonData'].get('showId') or i['commonData'].get('realShowId')):
|
||||
i=i['commonData']
|
||||
vlist.append({
|
||||
'vod_id': i.get('showId') or i.get('realShowId'),
|
||||
'vod_name': i['titleDTO'].get('displayName'),
|
||||
'vod_pic': i['posterDTO'].get('vThumbUrl'),
|
||||
'vod_year': i.get('feature'),
|
||||
'vod_remarks': i.get('updateNotice')
|
||||
})
|
||||
return {'list': vlist, 'page': pg}
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
return {'jx':1,'parse': 1, 'url': f"{self.ihost}/video?vid={id}", 'header': ''}
|
||||
|
||||
def localProxy(self, param):
|
||||
pass
|
||||
|
||||
def cf(self,params,b=False):
|
||||
response = self.session.get(f'{self.host}/category/data?params={quote(json.dumps(params))}&optionRefresh=1&pageNo=1').json()
|
||||
data=response['data']['filterData']
|
||||
session=quote(json.dumps(data['session']))
|
||||
if b:
|
||||
return session,self.get_filter_data(data['filter']['filterData'][1:])
|
||||
return session
|
||||
|
||||
def process_key(self, key):
|
||||
if '_' not in key:
|
||||
return key
|
||||
parts = key.split('_')
|
||||
result = parts[0]
|
||||
for part in parts[1:]:
|
||||
if part:
|
||||
result += part[0].upper() + part[1:]
|
||||
return result
|
||||
|
||||
def get_filter_data(self, data):
|
||||
result = []
|
||||
try:
|
||||
for item in data:
|
||||
if not item.get('subFilter'):
|
||||
continue
|
||||
first_sub = item['subFilter'][0]
|
||||
if not first_sub.get('filterType'):
|
||||
continue
|
||||
filter_item = {
|
||||
'key': self.process_key(first_sub['filterType']),
|
||||
'name': first_sub['title'],
|
||||
'value': []
|
||||
}
|
||||
for sub in item['subFilter']:
|
||||
if 'value' in sub:
|
||||
filter_item['value'].append({
|
||||
'n': sub['title'],
|
||||
'v': sub['value']
|
||||
})
|
||||
if filter_item['value']:
|
||||
result.append(filter_item)
|
||||
|
||||
except Exception as e:
|
||||
print(f"处理筛选数据时出错: {str(e)}")
|
||||
|
||||
return result
|
||||
|
||||
def get_ctoken(self):
|
||||
data=self.session.get(f'{self.h5host}/h5/mtop.ykrec.recommendservice.recommend/1.0/?jsv=2.6.1&appKey=24679788')
|
||||
|
||||
def md5(self,t,text):
|
||||
h = MD5.new()
|
||||
token=self.session.cookies.get('_m_h5_tk').split('_')[0]
|
||||
data=f"{token}&{t}&24679788&{text}"
|
||||
h.update(data.encode('utf-8'))
|
||||
return h.hexdigest()
|
||||
|
||||
def getdata(self, url, params, recursion_count=0, max_recursion=3):
|
||||
data = json.dumps(params)
|
||||
t = int(time.time() * 1000)
|
||||
jsdata = {
|
||||
'appKey': '24679788',
|
||||
't': t,
|
||||
'sign': self.md5(t, data),
|
||||
'data': data
|
||||
}
|
||||
response = self.session.get(url, params=jsdata)
|
||||
if '令牌过期' in response.text:
|
||||
if recursion_count >= max_recursion:
|
||||
raise Exception("达到最大递归次数,无法继续请求")
|
||||
self.get_ctoken()
|
||||
return self.getdata(url, params, recursion_count + 1, max_recursion)
|
||||
else:
|
||||
return response.json()
|
||||
|
||||
def getvinfo(self,params):
|
||||
body = {
|
||||
"ms_codes": "2019030100",
|
||||
"params": json.dumps(params),
|
||||
"system_info": "{\"os\":\"iku\",\"device\":\"iku\",\"ver\":\"9.2.9\",\"appPackageKey\":\"com.youku.iku\",\"appPackageId\":\"pcweb\"}"
|
||||
}
|
||||
data = self.getdata(f'{self.h5host}/h5/mtop.youku.columbus.gateway.new.execute/1.0/', body)
|
||||
okey = list(data['data'].keys())[0]
|
||||
i = data['data'][okey]['data']
|
||||
return i
|
||||
|
||||
def getinfo(self,params):
|
||||
i = self.getvinfo(params)
|
||||
jdata=i['nodes'][0]['nodes'][3]
|
||||
info=i['data']['extra']['episodeTotal']
|
||||
if i['data']['extra']['showCategory'] in ['电影','游戏']:
|
||||
jdata = i['nodes'][0]['nodes'][4]
|
||||
return jdata,info
|
||||
|
222
PY/光速APP.py
Normal file
222
PY/光速APP.py
Normal file
@ -0,0 +1,222 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# by @嗷呜
|
||||
import re
|
||||
import sys
|
||||
from Crypto.Hash import MD5
|
||||
sys.path.append('..')
|
||||
from Crypto.Cipher import AES
|
||||
from Crypto.Util.Padding import pad, unpad
|
||||
from urllib.parse import quote, urlparse
|
||||
from base64 import b64encode, b64decode
|
||||
import json
|
||||
import time
|
||||
from base.spider import Spider
|
||||
|
||||
class Spider(Spider):
|
||||
|
||||
def init(self, extend=""):
|
||||
self.host = self.gethost()
|
||||
pass
|
||||
|
||||
def getName(self):
|
||||
pass
|
||||
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
pass
|
||||
|
||||
def manualVideoCheck(self):
|
||||
pass
|
||||
|
||||
def action(self, action):
|
||||
pass
|
||||
|
||||
def destroy(self):
|
||||
pass
|
||||
|
||||
def homeContent(self, filter):
|
||||
data = self.getdata("/api.php/getappapi.index/initV119")
|
||||
dy = {"class": "类型", "area": "地区", "lang": "语言", "year": "年份", "letter": "字母", "by": "排序",
|
||||
"sort": "排序"}
|
||||
filters = {}
|
||||
classes = []
|
||||
json_data = data["type_list"]
|
||||
homedata = data["banner_list"][8:]
|
||||
for item in json_data:
|
||||
if item["type_name"] == "全部":
|
||||
continue
|
||||
has_non_empty_field = False
|
||||
jsontype_extend = json.loads(item["type_extend"])
|
||||
homedata.extend(item["recommend_list"])
|
||||
jsontype_extend["sort"] = "最新,最热,最赞"
|
||||
classes.append({"type_name": item["type_name"], "type_id": item["type_id"]})
|
||||
for key in dy:
|
||||
if key in jsontype_extend and jsontype_extend[key].strip() != "":
|
||||
has_non_empty_field = True
|
||||
break
|
||||
if has_non_empty_field:
|
||||
filters[str(item["type_id"])] = []
|
||||
for dkey in jsontype_extend:
|
||||
if dkey in dy and jsontype_extend[dkey].strip() != "":
|
||||
values = jsontype_extend[dkey].split(",")
|
||||
value_array = [{"n": value.strip(), "v": value.strip()} for value in values if
|
||||
value.strip() != ""]
|
||||
filters[str(item["type_id"])].append({"key": dkey, "name": dy[dkey], "value": value_array})
|
||||
result = {}
|
||||
result["class"] = classes
|
||||
result["filters"] = filters
|
||||
result["list"] = homedata[1:]
|
||||
return result
|
||||
|
||||
def homeVideoContent(self):
|
||||
pass
|
||||
|
||||
def categoryContent(self, tid, pg, filter, extend):
|
||||
body = {"area": extend.get('area', '全部'), "year": extend.get('year', '全部'), "type_id": tid, "page": pg,
|
||||
"sort": extend.get('sort', '最新'), "lang": extend.get('lang', '全部'),
|
||||
"class": extend.get('class', '全部')}
|
||||
result = {}
|
||||
data = self.getdata("/api.php/getappapi.index/typeFilterVodList", body)
|
||||
result["list"] = data["recommend_list"]
|
||||
result["page"] = pg
|
||||
result["pagecount"] = 9999
|
||||
result["limit"] = 90
|
||||
result["total"] = 999999
|
||||
return result
|
||||
|
||||
def detailContent(self, ids):
|
||||
body = f"vod_id={ids[0]}"
|
||||
data = self.getdata("/api.php/getappapi.index/vodDetail", body)
|
||||
vod = data["vod"]
|
||||
play = []
|
||||
names = []
|
||||
for itt in data["vod_play_list"]:
|
||||
a = []
|
||||
names.append(itt["player_info"]["show"])
|
||||
for it in itt['urls']:
|
||||
it['user_agent']=itt["player_info"].get("user_agent")
|
||||
it["parse"]=itt["player_info"].get("parse")
|
||||
a.append(f"{it['name']}${self.e64(json.dumps(it))}")
|
||||
play.append("#".join(a))
|
||||
vod["vod_play_from"] = "$$$".join(names)
|
||||
vod["vod_play_url"] = "$$$".join(play)
|
||||
result = {"list": [vod]}
|
||||
return result
|
||||
|
||||
def searchContent(self, key, quick, pg="1"):
|
||||
body = f"keywords={key}&type_id=0&page={pg}"
|
||||
data = self.getdata("/api.php/getappapi.index/searchList", body)
|
||||
result = {"list": data["search_list"], "page": pg}
|
||||
return result
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
ids = json.loads(self.d64(id))
|
||||
h={"User-Agent": (ids['user_agent'] or "okhttp/3.14.9")}
|
||||
url = ids['url']
|
||||
p=1
|
||||
try:
|
||||
if re.search(r'\?url=', ids['parse_api_url']):
|
||||
data=self.fetch(ids['parse_api_url'], headers=h, timeout=10).json()
|
||||
url=data.get('url') or data['data'].get('url')
|
||||
elif not re.search(r'\.m3u8|\.mp4', ids.get('url')):
|
||||
body = f"parse_api={ids.get('parse') or ids['parse_api_url'].replace(ids['url'], '')}&url={quote(self.aes('encrypt', ids['url']))}&token={ids.get('token')}"
|
||||
b = self.getdata("/api.php/getappapi.index/vodParse", body)['json']
|
||||
url = json.loads(b)['url']
|
||||
p=0
|
||||
except Exception as e:
|
||||
print('错误信息:',e)
|
||||
pass
|
||||
if re.search(r'\.jpg|\.png|\.jpeg', url):
|
||||
url = self.Mproxy(url)
|
||||
result = {}
|
||||
result["parse"] = p
|
||||
result["url"] = url
|
||||
result["header"] = h
|
||||
return result
|
||||
|
||||
def localProxy(self, param):
|
||||
return self.Mlocal(param)
|
||||
|
||||
def gethost(self):
|
||||
headers = {
|
||||
'User-Agent': 'okhttp/3.14.9'
|
||||
}
|
||||
host = self.fetch('https://jingyu-1312635929.cos.ap-nanjing.myqcloud.com/1.json',
|
||||
headers=headers).text.strip()
|
||||
return host
|
||||
|
||||
phend = {
|
||||
'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 11; M2012K10C Build/RP1A.200720.011)',
|
||||
'allowCrossProtocolRedirects': 'true'
|
||||
}
|
||||
|
||||
def aes(self, operation, text):
|
||||
key = "4d83b87c4c5ea111".encode("utf-8")
|
||||
iv = key
|
||||
if operation == "encrypt":
|
||||
cipher = AES.new(key, AES.MODE_CBC, iv)
|
||||
ct_bytes = cipher.encrypt(pad(text.encode("utf-8"), AES.block_size))
|
||||
ct = b64encode(ct_bytes).decode("utf-8")
|
||||
return ct
|
||||
elif operation == "decrypt":
|
||||
cipher = AES.new(key, AES.MODE_CBC, iv)
|
||||
pt = unpad(cipher.decrypt(b64decode(text)), AES.block_size)
|
||||
return pt.decode("utf-8")
|
||||
|
||||
def header(self):
|
||||
t = str(int(time.time()))
|
||||
header = {"Referer":self.host,
|
||||
"User-Agent": "okhttp/3.14.9", "app-version-code": "300", "app-ui-mode": "light",
|
||||
"app-api-verify-time": t, "app-user-device-id": self.md5(t),
|
||||
"app-api-verify-sign": self.aes("encrypt", t),
|
||||
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"}
|
||||
return header
|
||||
|
||||
def getdata(self, path, data=None):
|
||||
vdata = self.post(f"{self.host}{path}", headers=self.header(), data=data, timeout=10).json()['data']
|
||||
data1 = self.aes("decrypt", vdata)
|
||||
return json.loads(data1)
|
||||
|
||||
def Mproxy(self, url):
|
||||
return self.getProxyUrl() + "&url=" + b64encode(url.encode('utf-8')).decode('utf-8') + "&type=m3u8"
|
||||
|
||||
def Mlocal(self, param,header=None):
|
||||
url = self.d64(param["url"])
|
||||
ydata = self.fetch(url, headers=header, allow_redirects=False)
|
||||
data = ydata.content.decode('utf-8')
|
||||
if ydata.headers.get('Location'):
|
||||
url = ydata.headers['Location']
|
||||
data = self.fetch(url, headers=header).content.decode('utf-8')
|
||||
parsed_url = urlparse(url)
|
||||
durl = parsed_url.scheme + "://" + parsed_url.netloc
|
||||
lines = data.strip().split('\n')
|
||||
for index, string in enumerate(lines):
|
||||
if '#EXT' not in string and 'http' not in string:
|
||||
last_slash_index = string.rfind('/')
|
||||
lpath = string[:last_slash_index + 1]
|
||||
lines[index] = durl + ('' if lpath.startswith('/') else '/') + lpath
|
||||
data = '\n'.join(lines)
|
||||
return [200, "application/vnd.apple.mpegur", data]
|
||||
|
||||
def e64(self, text):
|
||||
try:
|
||||
text_bytes = text.encode('utf-8')
|
||||
encoded_bytes = b64encode(text_bytes)
|
||||
return encoded_bytes.decode('utf-8')
|
||||
except Exception as e:
|
||||
print(f"Base64编码错误: {str(e)}")
|
||||
return ""
|
||||
|
||||
def d64(self,encoded_text):
|
||||
try:
|
||||
encoded_bytes = encoded_text.encode('utf-8')
|
||||
decoded_bytes = b64decode(encoded_bytes)
|
||||
return decoded_bytes.decode('utf-8')
|
||||
except Exception as e:
|
||||
print(f"Base64解码错误: {str(e)}")
|
||||
return ""
|
||||
|
||||
def md5(self, text):
|
||||
h = MD5.new()
|
||||
h.update(text.encode('utf-8'))
|
||||
return h.hexdigest()
|
314
PY/剧多短剧APP.py
Normal file
314
PY/剧多短剧APP.py
Normal file
@ -0,0 +1,314 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# by @嗷呜
|
||||
import base64
|
||||
import binascii
|
||||
import json
|
||||
import random
|
||||
import sys
|
||||
import time
|
||||
import uuid
|
||||
from base64 import b64decode, b64encode
|
||||
from Crypto.Cipher import AES
|
||||
from Crypto.Hash import MD5
|
||||
from Crypto.Util.Padding import unpad, pad
|
||||
sys.path.append('..')
|
||||
from base.spider import Spider
|
||||
|
||||
|
||||
class Spider(Spider):
|
||||
|
||||
def init(self, extend=""):
|
||||
self.ut = False
|
||||
# self.did, self.ntid =self.getdid()
|
||||
self.did, self.ntid = 'e59eb2465f61b9ca','65a0de19b3a2ec93fa479ad6'
|
||||
self.token, self.uid = self.gettoken()
|
||||
self.phost, self.phz,self.mphost=self.getpic()
|
||||
# self.phost, self.phz,self.mphost = ('https://dbtp.tgydy.com','.log','https://dplay.nbzsmc.com')
|
||||
pass
|
||||
|
||||
def getName(self):
|
||||
pass
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
pass
|
||||
|
||||
def manualVideoCheck(self):
|
||||
pass
|
||||
|
||||
def destroy(self):
|
||||
pass
|
||||
|
||||
host='http://192.151.245.34:8089'
|
||||
|
||||
def md5(self, text):
|
||||
h = MD5.new()
|
||||
h.update(text.encode('utf-8'))
|
||||
return h.hexdigest()
|
||||
|
||||
def uuid(self):
|
||||
return str(uuid.uuid4())
|
||||
|
||||
def getdid(self):
|
||||
did = self.random_str(16)
|
||||
ntid = self.random_str(24)
|
||||
return did, ntid
|
||||
# try:
|
||||
# if self.getCache('did'):
|
||||
# return self.getCache('did'), self.getCache('ntid')
|
||||
# else:
|
||||
# self.setCache('did', did)
|
||||
# self.setCache('ntid', ntid)
|
||||
# return did, ntid
|
||||
# except Exception as e:
|
||||
# self.setCache('did', did)
|
||||
# self.setCache('ntid', ntid)
|
||||
# return did, ntid
|
||||
|
||||
def aes(self, text, bool=True):
|
||||
key = b64decode('c0k4N1RfKTY1U1cjJERFRA==')
|
||||
iv = b64decode('VzIjQWRDVkdZSGFzSEdEVA==')
|
||||
if bool:
|
||||
cipher = AES.new(key, AES.MODE_CBC, iv)
|
||||
ct_bytes = cipher.encrypt(pad(text.encode("utf-8"), AES.block_size))
|
||||
ct = b64encode(ct_bytes).decode("utf-8")
|
||||
return ct
|
||||
else:
|
||||
cipher = AES.new(key, AES.MODE_CBC, iv)
|
||||
pt = unpad(cipher.decrypt(b64decode(text)), AES.block_size)
|
||||
ptt=json.loads(pt.decode("utf-8"))
|
||||
return ptt
|
||||
|
||||
def random_str(self,length=24):
|
||||
hex_chars = '0123456789abcdef'
|
||||
return ''.join(random.choice(hex_chars) for _ in range(length))
|
||||
|
||||
def gettoken(self):
|
||||
params={"deviceId":self.did,"deviceModel":"8848钛晶手机","devicePlatform":"1","tenantId":self.ntid}
|
||||
data=self.getdata('/supports/anonyLogin',params)
|
||||
self.ut=True
|
||||
return data['data']['token'], data['data']['userId']
|
||||
|
||||
def getdata(self,path,params=None):
|
||||
t = int(time.time()*1000)
|
||||
n=self.md5(f'{self.uuid()}{t}')
|
||||
if params:
|
||||
ct=self.aes(json.dumps(params))
|
||||
else:
|
||||
ct=f'{t}{n}'
|
||||
s=self.md5(f'{ct}8j@78m.367HGDF')
|
||||
headers = {
|
||||
'User-Agent': 'okhttp-okgo/jeasonlzy',
|
||||
'Connection': 'Keep-Alive',
|
||||
'Accept-Language': 'zh-CN,zh;q=0.8',
|
||||
'tenantId': self.ntid,
|
||||
'n': n,
|
||||
't': str(int(t/1000)),
|
||||
's': s,
|
||||
}
|
||||
if self.ut:
|
||||
headers['ta-token'] = self.token
|
||||
headers['userId'] = self.uid
|
||||
if params:
|
||||
params={'ct':ct}
|
||||
response = self.post(f'{self.host}{path}', headers=headers, json=params).text
|
||||
else:
|
||||
response = self.fetch(f'{self.host}{path}', headers=headers).text
|
||||
data=self.aes(response[1:-1],False)
|
||||
return data
|
||||
|
||||
def getpic(self):
|
||||
try:
|
||||
at = int(time.time() * 1000)
|
||||
t=str(int(at/ 1000))
|
||||
n = self.md5(f'{self.uuid()}{at}')
|
||||
headers = {
|
||||
'Host': '192.151.245.34:8089',
|
||||
'User-Agent': 'okhttp-okgo/jeasonlzy',
|
||||
'Connection': 'Keep-Alive',
|
||||
'Accept-Language': 'zh-CN,zh;q=0.8',
|
||||
'tenantId': self.ntid,
|
||||
'userId': self.uid,
|
||||
'ta-token': self.token,
|
||||
'n': n,
|
||||
't': t,
|
||||
's': self.md5(f'{t}{n}8j@78m.367HGDF')
|
||||
}
|
||||
params = {
|
||||
'tenantId': self.ntid,
|
||||
}
|
||||
response = self.fetch(f'{self.host}/supports/configs', params=params, headers=headers).text
|
||||
data=self.aes(response[1:-1],False)
|
||||
config = {
|
||||
'image_cdn': '',
|
||||
'image_cdn_path': '',
|
||||
'cdn-domain': ''
|
||||
}
|
||||
for item in data.get('data', []):
|
||||
name = item.get('name')
|
||||
records = item.get('records', [])
|
||||
|
||||
if name in config and records:
|
||||
value = records[0].get('value', '')
|
||||
if name == 'cdn-domain':
|
||||
value = value.split('#')[0]
|
||||
config[name] = value
|
||||
|
||||
return config['image_cdn'], config['image_cdn_path'], config['cdn-domain']
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error in getpic: {e}")
|
||||
return 'https://dbtp.tgydy.com', '.log', 'https://dplay.nbzsmc.com'
|
||||
|
||||
def getlist(self,data):
|
||||
vod=[]
|
||||
for i in data:
|
||||
vod.append({
|
||||
'vod_id': f'{i.get("movieId")}@{i.get("entryNum")}',
|
||||
'vod_name': i.get('title'),
|
||||
'vod_pic': f'{self.getProxyUrl()}&path={i.get("thumbnail")}',
|
||||
'vod_year': i.get('score'),
|
||||
'vod_remarks': f'{i.get("entryNum")}集'
|
||||
})
|
||||
return vod
|
||||
|
||||
def homeContent(self, filter):
|
||||
data=self.getdata('/movies/classifies')
|
||||
result = {}
|
||||
cateManual = {
|
||||
"榜单": "ranking/getTodayHotRank",
|
||||
"专辑": "getTMovieFolderPage",
|
||||
"剧场": "getClassMoviePage2",
|
||||
"演员": "follow/getRecommendActorPage",
|
||||
}
|
||||
classes = []
|
||||
for k in cateManual:
|
||||
classes.append({
|
||||
'type_name': k,
|
||||
'type_id': cateManual[k]
|
||||
})
|
||||
filters = {}
|
||||
if data.get('data'):
|
||||
filters["getClassMoviePage2"] = [
|
||||
{
|
||||
"key": "type",
|
||||
"name": "分类",
|
||||
"value": [
|
||||
{"n": item["name"], "v": item["classifyId"]}
|
||||
for item in data["data"]
|
||||
]
|
||||
}
|
||||
]
|
||||
filters["ranking/getTodayHotRank"] = [
|
||||
{
|
||||
"key": "type",
|
||||
"name": "榜单",
|
||||
"value": [
|
||||
{"n": "播放榜", "v": "getWeekHotPlayRank"},
|
||||
{"n": "高赞榜", "v": "getWeekStarRank"},
|
||||
{"n": "追剧榜", "v": "getSubTMoviePage"},
|
||||
{"n": "高分榜", "v": "ranking/getScoreRank"}
|
||||
]
|
||||
}
|
||||
]
|
||||
filters["follow/getRecommendActorPage"] = [
|
||||
{
|
||||
"key": "type",
|
||||
"name": "性别",
|
||||
"value": [
|
||||
{"n": "男", "v": "0"},
|
||||
{"n": "女", "v": "1"}
|
||||
]
|
||||
}
|
||||
]
|
||||
result['class'] = classes
|
||||
result['filters'] = filters
|
||||
return result
|
||||
|
||||
def homeVideoContent(self):
|
||||
params = {"pageNo":"1","pageSize":"30","platform":"1","deviceId":self.did,"tenantId":self.ntid}
|
||||
data=self.getdata('/news/getRecommendTMoviePage',params)
|
||||
vod=self.getlist(data['data']['records'])
|
||||
return {'list':vod}
|
||||
|
||||
def categoryContent(self, tid, pg, filter, extend):
|
||||
params={}
|
||||
path = f'/news/{tid}'
|
||||
if tid=='getClassMoviePage2':
|
||||
parama={"pageNo":pg,"pageSize":"30","orderFlag":"0","haveActor":"-1","classifyId":extend.get('type','-1'),"tagId":""}
|
||||
elif 'rank' in tid:
|
||||
path=f'/news/{extend.get("type") or tid}'
|
||||
parama={"pageNo":pg,"pageSize":"30"}
|
||||
elif 'follow' in tid:
|
||||
parama={"pageNo":pg,"pageSize":"20"}
|
||||
if extend.get('type'):
|
||||
path=f'/news/getActorPage'
|
||||
parama={"pageNo":pg,"pageSize":"50","sex":extend.get('type')}
|
||||
elif tid=='getTMovieFolderPage':
|
||||
parama={"pageNo":pg,"pageSize":"20"}
|
||||
elif '@' in tid:
|
||||
path='/news/getActorTMoviePage'
|
||||
parama={"id":tid.split('@')[0],"pageNo":pg,"pageSize":"30"}
|
||||
params['platform'] = '1'
|
||||
params['deviceId'] = self.did
|
||||
params['tenantId'] = self.ntid
|
||||
data=self.getdata(path,parama)
|
||||
vods=[]
|
||||
if 'follow' in tid:
|
||||
for i in data['data']['records']:
|
||||
vods.append({
|
||||
'vod_id': f'{i.get("id")}@',
|
||||
'vod_name': i.get('name'),
|
||||
'vod_pic': i.get('avatar'),
|
||||
'vod_tag': 'folder',
|
||||
'vod_remarks': f'作品{i.get("movieNum")}',
|
||||
'style': {"type": "oval"}
|
||||
})
|
||||
else:
|
||||
vdata=data['data']['records']
|
||||
if tid=='getTMovieFolderPage':
|
||||
vdata=[j for i in data['data']['records'] for j in i['movieList']]
|
||||
vods=self.getlist(vdata)
|
||||
result = {}
|
||||
result['list'] = vods
|
||||
result['page'] = pg
|
||||
result['pagecount'] = 9999
|
||||
result['limit'] = 90
|
||||
result['total'] = 999999
|
||||
return result
|
||||
|
||||
def detailContent(self, ids):
|
||||
ids=ids[0].split('@')
|
||||
params = {"pageNo": "1", "pageSize": ids[1], "movieId": ids[0], "platform": "1", "deviceId": self.did, "tenantId": self.ntid}
|
||||
data = self.getdata('/news/getEntryPage', params)
|
||||
print(data)
|
||||
plist=[f'第{i.get("entryNum")}集${i.get("mp4PlayAddress") or i.get("playAddress")}' for i in data['data']['records']]
|
||||
vod = {
|
||||
'vod_play_from': '嗷呜爱看短剧',
|
||||
'vod_play_url': '#'.join(plist),
|
||||
}
|
||||
return {'list':[vod]}
|
||||
|
||||
def searchContent(self, key, quick, pg="1"):
|
||||
params = {"pageNo": pg, "pageSize": "20", "keyWord": key, "orderFlag": "0", "platform": "1", "deviceId": self.did, "tenantId": self.ntid}
|
||||
data = self.getdata('/news/searchTMoviePage', params)
|
||||
vod = self.getlist(data['data']['records'])
|
||||
return {'list':vod,'page':pg}
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
return {'parse': 0, 'url': f'{self.mphost}{id}', 'header': {'User-Agent':'Dalvik/2.1.0 (Linux; U; Android 11; M2012K10C Build/RP1A.200720.011)'}}
|
||||
|
||||
def localProxy(self, param):
|
||||
type=param.get('path').split('.')[-1]
|
||||
data=self.fetch(f'{self.phost}{param.get("path")}{self.phz}',headers={'User-Agent':'Dalvik/2.1.0 (Linux; U; Android 11; M2012K10C Build/RP1A.200720.011)'})
|
||||
def decrypt(encrypted_text):
|
||||
try:
|
||||
key = base64.urlsafe_b64decode("iM41VipvCFtToAFFRExEXw==")
|
||||
iv = base64.urlsafe_b64decode("0AXRTXzmMSrlRSemWb4sVQ==")
|
||||
cipher = AES.new(key, AES.MODE_CBC, iv)
|
||||
decrypted_padded = cipher.decrypt(encrypted_text)
|
||||
decrypted_data = unpad(decrypted_padded, AES.block_size)
|
||||
return decrypted_data
|
||||
except (binascii.Error, ValueError):
|
||||
return None
|
||||
return [200, f'image/{type}', decrypt(data.content)]
|
||||
|
175
PY/小红薯APP.py
Normal file
175
PY/小红薯APP.py
Normal file
@ -0,0 +1,175 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# by @嗷呜
|
||||
import json
|
||||
import random
|
||||
import sys
|
||||
import time
|
||||
from base64 import b64decode
|
||||
from Crypto.Cipher import AES
|
||||
from Crypto.Hash import MD5
|
||||
from Crypto.Util.Padding import unpad
|
||||
sys.path.append('..')
|
||||
from base.spider import Spider
|
||||
|
||||
|
||||
class Spider(Spider):
|
||||
|
||||
def getName(self):
|
||||
return "小红书"
|
||||
|
||||
def init(self, extend=""):
|
||||
self.did = self.random_str(32)
|
||||
self.token,self.phost = self.gettoken()
|
||||
pass
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
pass
|
||||
|
||||
def manualVideoCheck(self):
|
||||
pass
|
||||
|
||||
def destroy(self):
|
||||
pass
|
||||
|
||||
def random_str(self,length=16):
|
||||
hex_chars = '0123456789abcdef'
|
||||
return ''.join(random.choice(hex_chars) for _ in range(length))
|
||||
|
||||
def md5(self, text: str) -> str:
|
||||
h = MD5.new()
|
||||
h.update(text.encode('utf-8'))
|
||||
return h.hexdigest()
|
||||
|
||||
def homeContent(self, filter):
|
||||
data = self.fetch(f'{self.host}/api/video/queryClassifyList?mark=4', headers=self.headers()).json()['encData']
|
||||
data1 = self.aes(data)
|
||||
result = {}
|
||||
classes = []
|
||||
for k in data1['data']:
|
||||
classes.append({'type_name': k['classifyTitle'], 'type_id': k['classifyId']})
|
||||
result['class'] = classes
|
||||
return result
|
||||
|
||||
def homeVideoContent(self):
|
||||
pass
|
||||
|
||||
def categoryContent(self, tid, pg, filter, extend):
|
||||
path=f'/api/short/video/getShortVideos?classifyId={tid}&videoMark=4&page={pg}&pageSize=20'
|
||||
result = {}
|
||||
videos = []
|
||||
data=self.fetch(f'{self.host}{path}', headers=self.headers()).json()['encData']
|
||||
vdata=self.aes(data)
|
||||
for k in vdata['data']:
|
||||
videos.append({"vod_id": k['videoId'], 'vod_name': k.get('title'), 'vod_pic': self.getProxyUrl() + '&url=' + k['coverImg'],
|
||||
'vod_remarks': self.dtim(k.get('playTime'))})
|
||||
result["list"] = videos
|
||||
result["page"] = pg
|
||||
result["pagecount"] = 9999
|
||||
result["limit"] = 90
|
||||
result["total"] = 999999
|
||||
return result
|
||||
|
||||
def detailContent(self, ids):
|
||||
path = f'/api/video/getVideoById?videoId={ids[0]}'
|
||||
data = self.fetch(f'{self.host}{path}', headers=self.headers()).json()['encData']
|
||||
v = self.aes(data)
|
||||
d=f'{v["title"]}$auth_key={v["authKey"]}&path={v["videoUrl"]}'
|
||||
vod = {'vod_name': v["title"], 'type_name': ''.join(v.get('tagTitles',[])),'vod_play_from': v.get('nickName') or "小红书官方", 'vod_play_url': d}
|
||||
result = {"list": [vod]}
|
||||
return result
|
||||
|
||||
def searchContent(self, key, quick, pg='1'):
|
||||
pass
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
h=self.headers()
|
||||
h['Authorization'] = h.pop('aut')
|
||||
del h['deviceid']
|
||||
result = {"parse": 0, "url": f"{self.host}/api/m3u8/decode/authPath?{id}", "header": h}
|
||||
return result
|
||||
|
||||
def localProxy(self, param):
|
||||
return self.action(param)
|
||||
|
||||
def aes(self, word):
|
||||
key = b64decode("SmhiR2NpT2lKSVV6STFOaQ==")
|
||||
iv = key
|
||||
cipher = AES.new(key, AES.MODE_CBC, iv)
|
||||
decrypted = unpad(cipher.decrypt(b64decode(word)), AES.block_size)
|
||||
return json.loads(decrypted.decode('utf-8'))
|
||||
|
||||
def dtim(self, seconds):
|
||||
try:
|
||||
seconds = int(seconds)
|
||||
hours = seconds // 3600
|
||||
remaining_seconds = seconds % 3600
|
||||
minutes = remaining_seconds // 60
|
||||
remaining_seconds = remaining_seconds % 60
|
||||
|
||||
formatted_minutes = str(minutes).zfill(2)
|
||||
formatted_seconds = str(remaining_seconds).zfill(2)
|
||||
|
||||
if hours > 0:
|
||||
formatted_hours = str(hours).zfill(2)
|
||||
return f"{formatted_hours}:{formatted_minutes}:{formatted_seconds}"
|
||||
else:
|
||||
return f"{formatted_minutes}:{formatted_seconds}"
|
||||
except:
|
||||
return ''
|
||||
|
||||
def getsign(self):
|
||||
t=str(int(time.time() * 1000))
|
||||
return self.md5(t[3:8])
|
||||
|
||||
def gettoken(self):
|
||||
url = f'{self.host}/api/user/traveler'
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Linux; Android 11; M2012K10C Build/RP1A.200720.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/87.0.4280.141 Mobile Safari/537.36;SuiRui/xhs/ver=1.2.6',
|
||||
'deviceid': self.did, 't': str(int(time.time() * 1000)), 's': self.getsign(), }
|
||||
data = {'deviceId': self.did, 'tt': 'U', 'code': '', 'chCode': 'dafe13'}
|
||||
data1 = self.post(url, json=data, headers=headers).json()
|
||||
data2 = data1['data']
|
||||
return data2['token'], data2['imgDomain']
|
||||
|
||||
host = 'https://jhfkdnov21vfd.dyfcbkggxn.work'
|
||||
|
||||
def headers(self):
|
||||
henda = {
|
||||
'User-Agent': 'Mozilla/5.0 (Linux; Android 11; M2012K10C Build/RP1A.200720.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/87.0.4280.141 Mobile Safari/537.36;SuiRui/xhs/ver=1.2.6',
|
||||
'deviceid': self.did, 't': str(int(time.time() * 1000)), 's': self.getsign(), 'aut': self.token}
|
||||
return henda
|
||||
|
||||
def action(self, param):
|
||||
headers = {
|
||||
'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 11; M2012K10C Build/RP1A.200720.011)'}
|
||||
data = self.fetch(f'{self.phost}{param["url"]}', headers=headers)
|
||||
type=data.headers.get('Content-Type').split(';')[0]
|
||||
base64_data = self.img(data.content, 100, '2020-zq3-888')
|
||||
return [200, type, base64_data]
|
||||
|
||||
def img(self, data: bytes, length: int, key: str):
|
||||
GIF = b'\x47\x49\x46'
|
||||
JPG = b'\xFF\xD8\xFF'
|
||||
PNG = b'\x89\x50\x4E\x47\x0D\x0A\x1A\x0A'
|
||||
|
||||
def is_dont_need_decode_for_gif(data):
|
||||
return len(data) > 2 and data[:3] == GIF
|
||||
|
||||
def is_dont_need_decode_for_jpg(data):
|
||||
return len(data) > 7 and data[:3] == JPG
|
||||
|
||||
def is_dont_need_decode_for_png(data):
|
||||
return len(data) > 7 and data[1:8] == PNG[1:8]
|
||||
|
||||
if is_dont_need_decode_for_png(data):
|
||||
return data
|
||||
elif is_dont_need_decode_for_gif(data):
|
||||
return data
|
||||
elif is_dont_need_decode_for_jpg(data):
|
||||
return data
|
||||
else:
|
||||
key_bytes = key.encode('utf-8')
|
||||
result = bytearray(data)
|
||||
for i in range(length):
|
||||
result[i] ^= key_bytes[i % len(key_bytes)]
|
||||
return bytes(result)
|
171
PY/小苹果APP.py
Normal file
171
PY/小苹果APP.py
Normal file
@ -0,0 +1,171 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# by @嗷呜
|
||||
import sys
|
||||
sys.path.append('..')
|
||||
from base.spider import Spider
|
||||
from urllib.parse import quote
|
||||
|
||||
class Spider(Spider):
|
||||
def getName(self):
|
||||
return "xpg"
|
||||
|
||||
def init(self, extend=""):
|
||||
pass
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
pass
|
||||
|
||||
def manualVideoCheck(self):
|
||||
pass
|
||||
|
||||
def destroy(self):
|
||||
pass
|
||||
|
||||
def homeContent(self, filter):
|
||||
data = self.fetch(
|
||||
"{0}/api.php/v2.vod/androidtypes".format(self.host),
|
||||
headers=self.header,
|
||||
).json()
|
||||
dy = {
|
||||
"classes": "类型",
|
||||
"areas": "地区",
|
||||
"years": "年份",
|
||||
"sortby": "排序",
|
||||
}
|
||||
filters = {}
|
||||
classes = []
|
||||
for item in data['data']:
|
||||
has_non_empty_field = False
|
||||
item['soryby'] = ['updatetime', 'hits', 'score']
|
||||
demos = ['时间', '人气', '评分']
|
||||
classes.append({"type_name": item["type_name"], "type_id": str(item["type_id"])})
|
||||
for key in dy:
|
||||
if key in item and len(item[key]) > 1:
|
||||
has_non_empty_field = True
|
||||
break
|
||||
if has_non_empty_field:
|
||||
filters[str(item["type_id"])] = []
|
||||
for dkey in item:
|
||||
if dkey in dy and len(item[dkey]) > 1:
|
||||
values = item[dkey]
|
||||
value_array = [
|
||||
{"n": demos[idx] if dkey == "sortby" else value.strip(), "v": value.strip()}
|
||||
for idx, value in enumerate(values)
|
||||
if value.strip() != ""
|
||||
]
|
||||
filters[str(item["type_id"])].append(
|
||||
{"key": dkey, "name": dy[dkey], "value": value_array}
|
||||
)
|
||||
result = {}
|
||||
result["class"] = classes
|
||||
result["filters"] = filters
|
||||
return result
|
||||
|
||||
host = "http://item.xpgtv.com"
|
||||
header = {
|
||||
'User-Agent': 'okhttp/3.12.11',
|
||||
'token': 'ElEDlwCVgXcFHFhddiq2JKteHofExRBUrfNlmHrWetU3VVkxnzJAodl52N9EUFS+Dig2A/fBa/V9RuoOZRBjYvI+GW8kx3+xMlRecaZuECdb/3AdGkYpkjW3wCnpMQxf8vVeCz5zQLDr8l8bUChJiLLJLGsI+yiNskiJTZz9HiGBZhZuWh1mV1QgYah5CLTbSz8=',
|
||||
'token2': 'a0kEsBKRgTkBZ29NZ3WcNKN/C4T00RN/hNkmmGa5JMBeEENnqydLoetm/t8=',
|
||||
'user_id': 'XPGBOX',
|
||||
'version': 'XPGBOX com.phoenix.tv1.5.3',
|
||||
'timestamp': '1732286435',
|
||||
'hash': 'd9ab',
|
||||
}
|
||||
|
||||
def homeVideoContent(self):
|
||||
rsp = self.fetch("{0}/api.php/v2.main/androidhome".format(self.host), headers=self.header)
|
||||
root = rsp.json()['data']['list']
|
||||
videos = []
|
||||
for vodd in root:
|
||||
for vod in vodd['list']:
|
||||
videos.append({
|
||||
"vod_id": vod['id'],
|
||||
"vod_name": vod['name'],
|
||||
"vod_pic": vod['pic'],
|
||||
"vod_remarks": vod['score']
|
||||
})
|
||||
result = {
|
||||
'list': videos
|
||||
}
|
||||
return result
|
||||
|
||||
def categoryContent(self, tid, pg, filter, extend):
|
||||
parms = []
|
||||
parms.append(f"page={pg}")
|
||||
parms.append(f"type={tid}")
|
||||
if extend.get('areas'):
|
||||
parms.append(f"area={quote(extend['areaes'])}")
|
||||
if extend.get('years'):
|
||||
parms.append(f"year={quote(extend['yeares'])}")
|
||||
if extend.get('sortby'):
|
||||
parms.append(f"sortby={extend['sortby']}")
|
||||
if extend.get('classes'):
|
||||
parms.append(f"class={quote(extend['classes'])}")
|
||||
parms = "&".join(parms)
|
||||
result = {}
|
||||
url = '{0}/api.php/v2.vod/androidfilter10086?{1}'.format(self.host, parms)
|
||||
rsp = self.fetch(url, headers=self.header)
|
||||
root = rsp.json()['data']
|
||||
videos = []
|
||||
for vod in root:
|
||||
videos.append({
|
||||
"vod_id": vod['id'],
|
||||
"vod_name": vod['name'],
|
||||
"vod_pic": vod['pic'],
|
||||
"vod_remarks": vod['score']
|
||||
})
|
||||
result['list'] = videos
|
||||
result['page'] = pg
|
||||
result['pagecount'] = 9999
|
||||
result['limit'] = 90
|
||||
result['total'] = 999999
|
||||
return result
|
||||
|
||||
def detailContent(self, ids):
|
||||
id = ids[0]
|
||||
url = '{0}/api.php/v3.vod/androiddetail2?vod_id={1}'.format(self.host, id)
|
||||
rsp = self.fetch(url, headers=self.header)
|
||||
root = rsp.json()['data']
|
||||
node = root['urls']
|
||||
d = [it['key'] + "$" + f"http://c.xpgtv.net/m3u8/{it['url']}.m3u8" for it in node]
|
||||
vod = {
|
||||
"vod_name": root['name'],
|
||||
'vod_play_from': '小苹果',
|
||||
'vod_play_url': '#'.join(d),
|
||||
}
|
||||
print(vod)
|
||||
result = {
|
||||
'list': [
|
||||
vod
|
||||
]
|
||||
}
|
||||
return result
|
||||
|
||||
def searchContent(self, key, quick, pg='1'):
|
||||
url = '{0}/api.php/v2.vod/androidsearch10086?page={1}&wd={2}'.format(self.host, pg, key)
|
||||
rsp = self.fetch(url, headers=self.header)
|
||||
root = rsp.json()['data']
|
||||
videos = []
|
||||
for vod in root:
|
||||
videos.append({
|
||||
"vod_id": vod['id'],
|
||||
"vod_name": vod['name'],
|
||||
"vod_pic": vod['pic'],
|
||||
"vod_remarks": vod['score']
|
||||
})
|
||||
result = {
|
||||
'list': videos
|
||||
}
|
||||
return result
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
result = {}
|
||||
result["parse"] = 0
|
||||
result["url"] = id
|
||||
result["header"] = self.header
|
||||
return result
|
||||
|
||||
def localProxy(self, param):
|
||||
pass
|
||||
|
||||
|
220
PY/悠悠APP.py
Normal file
220
PY/悠悠APP.py
Normal file
@ -0,0 +1,220 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# by @嗷呜
|
||||
import re
|
||||
import sys
|
||||
from Crypto.Hash import MD5
|
||||
sys.path.append("..")
|
||||
from Crypto.Cipher import AES
|
||||
from Crypto.Util.Padding import pad, unpad
|
||||
from urllib.parse import quote, urlparse
|
||||
from base64 import b64encode, b64decode
|
||||
import json
|
||||
import time
|
||||
from base.spider import Spider
|
||||
|
||||
class Spider(Spider):
|
||||
|
||||
def init(self, extend=""):
|
||||
self.host = self.gethost()
|
||||
pass
|
||||
|
||||
def getName(self):
|
||||
pass
|
||||
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
pass
|
||||
|
||||
def manualVideoCheck(self):
|
||||
pass
|
||||
|
||||
def action(self, action):
|
||||
pass
|
||||
|
||||
def destroy(self):
|
||||
pass
|
||||
|
||||
def homeContent(self, filter):
|
||||
data = self.getdata("/api.php/getappapi.index/initV119")
|
||||
dy = {"class": "类型", "area": "地区", "lang": "语言", "year": "年份", "letter": "字母", "by": "排序",
|
||||
"sort": "排序"}
|
||||
filters = {}
|
||||
classes = []
|
||||
json_data = data["type_list"]
|
||||
homedata = data["banner_list"][8:]
|
||||
for item in json_data:
|
||||
if item["type_name"] == "全部":
|
||||
continue
|
||||
has_non_empty_field = False
|
||||
jsontype_extend = json.loads(item["type_extend"])
|
||||
homedata.extend(item["recommend_list"])
|
||||
jsontype_extend["sort"] = "最新,最热,最赞"
|
||||
classes.append({"type_name": item["type_name"], "type_id": item["type_id"]})
|
||||
for key in dy:
|
||||
if key in jsontype_extend and jsontype_extend[key].strip() != "":
|
||||
has_non_empty_field = True
|
||||
break
|
||||
if has_non_empty_field:
|
||||
filters[str(item["type_id"])] = []
|
||||
for dkey in jsontype_extend:
|
||||
if dkey in dy and jsontype_extend[dkey].strip() != "":
|
||||
values = jsontype_extend[dkey].split(",")
|
||||
value_array = [{"n": value.strip(), "v": value.strip()} for value in values if
|
||||
value.strip() != ""]
|
||||
filters[str(item["type_id"])].append({"key": dkey, "name": dy[dkey], "value": value_array})
|
||||
result = {}
|
||||
result["class"] = classes
|
||||
result["filters"] = filters
|
||||
result["list"] = homedata[1:]
|
||||
return result
|
||||
|
||||
def homeVideoContent(self):
|
||||
pass
|
||||
|
||||
def categoryContent(self, tid, pg, filter, extend):
|
||||
body = {"area": extend.get('area', '全部'), "year": extend.get('year', '全部'), "type_id": tid, "page": pg,
|
||||
"sort": extend.get('sort', '最新'), "lang": extend.get('lang', '全部'),
|
||||
"class": extend.get('class', '全部')}
|
||||
result = {}
|
||||
data = self.getdata("/api.php/getappapi.index/typeFilterVodList", body)
|
||||
result["list"] = data["recommend_list"]
|
||||
result["page"] = pg
|
||||
result["pagecount"] = 9999
|
||||
result["limit"] = 90
|
||||
result["total"] = 999999
|
||||
return result
|
||||
|
||||
def detailContent(self, ids):
|
||||
body = f"vod_id={ids[0]}"
|
||||
data = self.getdata("/api.php/getappapi.index/vodDetail", body)
|
||||
vod = data["vod"]
|
||||
play = []
|
||||
names = []
|
||||
for itt in data["vod_play_list"]:
|
||||
a = []
|
||||
names.append(itt["player_info"]["show"])
|
||||
for it in itt['urls']:
|
||||
it['user_agent']=itt["player_info"].get("user_agent")
|
||||
it["parse"]=itt["player_info"].get("parse")
|
||||
a.append(f"{it['name']}${self.e64(json.dumps(it))}")
|
||||
play.append("#".join(a))
|
||||
vod["vod_play_from"] = "$$$".join(names)
|
||||
vod["vod_play_url"] = "$$$".join(play)
|
||||
result = {"list": [vod]}
|
||||
return result
|
||||
|
||||
def searchContent(self, key, quick, pg="1"):
|
||||
body = f"keywords={key}&type_id=0&page={pg}"
|
||||
data = self.getdata("/api.php/getappapi.index/searchList", body)
|
||||
result = {"list": data["search_list"], "page": pg}
|
||||
return result
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
ids = json.loads(self.d64(id))
|
||||
h={"User-Agent": (ids['user_agent'] or "okhttp/3.14.9")}
|
||||
url = ids['url']
|
||||
p=1
|
||||
try:
|
||||
if re.search(r'\?url=', ids['parse_api_url']):
|
||||
data=self.fetch(ids['parse_api_url'], headers=h, timeout=10).json()
|
||||
url=data.get('url') or data['data'].get('url')
|
||||
elif not re.search(r'\.m3u8|\.mp4', ids.get('url')):
|
||||
body = f"parse_api={ids.get('parse') or ids['parse_api_url'].replace(ids['url'], '')}&url={quote(self.aes(ids['url'],True))}&token={ids.get('token')}"
|
||||
b = self.getdata("/api.php/getappapi.index/vodParse", body)['json']
|
||||
url = json.loads(b)['url']
|
||||
p=0
|
||||
except Exception as e:
|
||||
print('错误信息:',e)
|
||||
pass
|
||||
if re.search(r'\.jpg|\.png|\.jpeg', url):
|
||||
url = self.Mproxy(url)
|
||||
result = {}
|
||||
result["parse"] = p
|
||||
result["url"] = url
|
||||
result["header"] = h
|
||||
return result
|
||||
|
||||
def localProxy(self, param):
|
||||
return self.Mlocal(param)
|
||||
|
||||
def gethost(self):
|
||||
headers = {
|
||||
'User-Agent': 'okhttp/3.14.9'
|
||||
}
|
||||
host = self.fetch('http://host.yyys.news/250123.txt', headers=headers).text
|
||||
return host.strip()
|
||||
|
||||
phend = {
|
||||
'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 11; M2012K10C Build/RP1A.200720.011)',
|
||||
'allowCrossProtocolRedirects': 'true'
|
||||
}
|
||||
|
||||
def aes(self, text,b=None):
|
||||
key = b"RuN9LRvwTRgpQnpK"
|
||||
cipher = AES.new(key, AES.MODE_CBC, key)
|
||||
if b:
|
||||
ct_bytes = cipher.encrypt(pad(text.encode("utf-8"), AES.block_size))
|
||||
ct = b64encode(ct_bytes).decode("utf-8")
|
||||
return ct
|
||||
else :
|
||||
pt = unpad(cipher.decrypt(b64decode(text)), AES.block_size)
|
||||
return pt.decode("utf-8")
|
||||
|
||||
def header(self):
|
||||
t = str(int(time.time()))
|
||||
header = {"Referer":self.host,
|
||||
"User-Agent": "okhttp/3.14.9", "app-version-code": "547", "app-ui-mode": "light",
|
||||
"app-api-verify-time": t, "app-user-device-id": self.md5(t),
|
||||
"app-api-verify-sign": self.aes(t,True),
|
||||
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"}
|
||||
return header
|
||||
|
||||
def getdata(self, path, data=None):
|
||||
vdata = self.post(f"{self.host}{path}", headers=self.header(), data=data, timeout=10).json()['data']
|
||||
data1 = self.aes(vdata)
|
||||
return json.loads(data1)
|
||||
|
||||
def Mproxy(self, url):
|
||||
return f"{self.getProxyUrl()}&url={self.e64(url)}&type=m3u8"
|
||||
|
||||
def Mlocal(self, param,header=None):
|
||||
url = self.d64(param["url"])
|
||||
ydata = self.fetch(url, headers=header, allow_redirects=False)
|
||||
data = ydata.content.decode('utf-8')
|
||||
if ydata.headers.get('Location'):
|
||||
url = ydata.headers['Location']
|
||||
data = self.fetch(url, headers=header).content.decode('utf-8')
|
||||
parsed_url = urlparse(url)
|
||||
durl = parsed_url.scheme + "://" + parsed_url.netloc
|
||||
lines = data.strip().split('\n')
|
||||
for index, string in enumerate(lines):
|
||||
if '#EXT' not in string and 'http' not in string:
|
||||
last_slash_index = string.rfind('/')
|
||||
lpath = string[:last_slash_index + 1]
|
||||
lines[index] = durl + ('' if lpath.startswith('/') else '/') + lpath
|
||||
data = '\n'.join(lines)
|
||||
return [200, "application/vnd.apple.mpegur", data]
|
||||
|
||||
def e64(self, text):
|
||||
try:
|
||||
text_bytes = text.encode('utf-8')
|
||||
encoded_bytes = b64encode(text_bytes)
|
||||
return encoded_bytes.decode('utf-8')
|
||||
except Exception as e:
|
||||
print(f"Base64编码错误: {str(e)}")
|
||||
return ""
|
||||
|
||||
def d64(self,encoded_text):
|
||||
try:
|
||||
encoded_bytes = encoded_text.encode('utf-8')
|
||||
decoded_bytes = b64decode(encoded_bytes)
|
||||
return decoded_bytes.decode('utf-8')
|
||||
except Exception as e:
|
||||
print(f"Base64解码错误: {str(e)}")
|
||||
return ""
|
||||
|
||||
def md5(self, text):
|
||||
h = MD5.new()
|
||||
h.update(text.encode('utf-8'))
|
||||
return h.hexdigest()
|
||||
|
216
PY/推特APP.py
Normal file
216
PY/推特APP.py
Normal file
@ -0,0 +1,216 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# by @嗷呜
|
||||
import json
|
||||
import sys
|
||||
import time
|
||||
from base64 import b64decode
|
||||
from urllib.parse import quote
|
||||
from Crypto.Cipher import AES
|
||||
from Crypto.Hash import MD5
|
||||
from Crypto.Util.Padding import unpad
|
||||
sys.path.append('..')
|
||||
from base.spider import Spider
|
||||
|
||||
|
||||
class Spider(Spider):
|
||||
|
||||
def getName(self):
|
||||
return "tuit"
|
||||
|
||||
def init(self, extend=""):
|
||||
self.did = MD5.new((self.t).encode()).hexdigest()
|
||||
self.token = self.gettoken()
|
||||
pass
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
pass
|
||||
|
||||
def manualVideoCheck(self):
|
||||
pass
|
||||
|
||||
def action(self, action):
|
||||
pass
|
||||
|
||||
def destroy(self):
|
||||
pass
|
||||
|
||||
def aes(self, word):
|
||||
key = b64decode("SmhiR2NpT2lKSVV6STFOaQ==")
|
||||
iv = key
|
||||
cipher = AES.new(key, AES.MODE_CBC, iv)
|
||||
decrypted = unpad(cipher.decrypt(b64decode(word)), AES.block_size)
|
||||
return json.loads(decrypted.decode('utf-8'))
|
||||
|
||||
def dtim(self, seconds):
|
||||
seconds = int(seconds)
|
||||
hours = seconds // 3600
|
||||
remaining_seconds = seconds % 3600
|
||||
minutes = remaining_seconds // 60
|
||||
remaining_seconds = remaining_seconds % 60
|
||||
|
||||
formatted_minutes = str(minutes).zfill(2)
|
||||
formatted_seconds = str(remaining_seconds).zfill(2)
|
||||
|
||||
if hours > 0:
|
||||
formatted_hours = str(hours).zfill(2)
|
||||
return f"{formatted_hours}:{formatted_minutes}:{formatted_seconds}"
|
||||
else:
|
||||
return f"{formatted_minutes}:{formatted_seconds}"
|
||||
|
||||
def gettoken(self):
|
||||
url = 'https://d1frehx187fm2c.cloudfront.net/api/user/traveler'
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Linux; Android 11; M2012K10C Build/RP1A.200720.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/87.0.4280.141 Mobile Safari/537.36;SuiRui/twitter/ver=1.3.4',
|
||||
'deviceid': self.did, 't': self.t, 's': self.sign, }
|
||||
data = {'deviceId': self.did, 'tt': 'U', 'code': '', 'chCode': ''}
|
||||
data1 = self.post(url, json=data, headers=headers).json()
|
||||
token = data1['data']['token']
|
||||
return token
|
||||
|
||||
t = str(int(time.time() * 1000))
|
||||
sign = MD5.new((t[3:8]).encode()).hexdigest()
|
||||
host = 'https://api.wcyfhknomg.work'
|
||||
|
||||
def headers(self):
|
||||
henda = {
|
||||
'User-Agent': 'Mozilla/5.0 (Linux; Android 11; M2012K10C Build/RP1A.200720.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/87.0.4280.141 Mobile Safari/537.36;SuiRui/twitter/ver=1.3.4',
|
||||
'deviceid': self.did, 't': self.t, 's': self.sign, 'aut': self.token}
|
||||
return henda
|
||||
|
||||
def homeContent(self, filter):
|
||||
data = self.fetch(f'{self.host}/api/video/classifyList', headers=self.headers()).json()['encData']
|
||||
data1 = self.aes(data)
|
||||
result = {'filters': {"1": [{"key": "fl", "name": "分类",
|
||||
"value": [{"n": "最近更新", "v": "1"}, {"n": "最多播放", "v": "2"},
|
||||
{"n": "好评榜", "v": "3"}]}], "2": [{"key": "fl", "name": "分类",
|
||||
"value": [
|
||||
{"n": "最近更新", "v": "1"},
|
||||
{"n": "最多播放", "v": "2"},
|
||||
{"n": "好评榜", "v": "3"}]}],
|
||||
"3": [{"key": "fl", "name": "分类",
|
||||
"value": [{"n": "最近更新", "v": "1"}, {"n": "最多播放", "v": "2"},
|
||||
{"n": "好评榜", "v": "3"}]}], "4": [{"key": "fl", "name": "分类",
|
||||
"value": [
|
||||
{"n": "最近更新", "v": "1"},
|
||||
{"n": "最多播放", "v": "2"},
|
||||
{"n": "好评榜", "v": "3"}]}],
|
||||
"5": [{"key": "fl", "name": "分类",
|
||||
"value": [{"n": "最近更新", "v": "1"}, {"n": "最多播放", "v": "2"},
|
||||
{"n": "好评榜", "v": "3"}]}], "6": [{"key": "fl", "name": "分类",
|
||||
"value": [
|
||||
{"n": "最近更新", "v": "1"},
|
||||
{"n": "最多播放", "v": "2"},
|
||||
{"n": "好评榜", "v": "3"}]}],
|
||||
"7": [{"key": "fl", "name": "分类",
|
||||
"value": [{"n": "最近更新", "v": "1"}, {"n": "最多播放", "v": "2"},
|
||||
{"n": "好评榜", "v": "3"}]}], "jx": [{"key": "type", "name": "精选",
|
||||
"value": [{"n": "日榜", "v": "1"},
|
||||
{"n": "周榜", "v": "2"},
|
||||
{"n": "月榜", "v": "3"},
|
||||
{"n": "总榜",
|
||||
"v": "4"}]}]}}
|
||||
classes = [{'type_name': "精选", 'type_id': "jx"}]
|
||||
for k in data1['data']:
|
||||
classes.append({'type_name': k['classifyTitle'], 'type_id': k['classifyId']})
|
||||
result['class'] = classes
|
||||
return result
|
||||
|
||||
def homeVideoContent(self):
|
||||
pass
|
||||
|
||||
def categoryContent(self, tid, pg, filter, extend):
|
||||
path = f'/api/video/queryVideoByClassifyId?pageSize=20&page={pg}&classifyId={tid}&sortType={extend.get("fl", "1")}'
|
||||
if 'click' in tid:
|
||||
path = f'/api/video/queryPersonVideoByType?pageSize=20&page={pg}&userId={tid.replace("click", "")}'
|
||||
if tid == 'jx':
|
||||
path = f'/api/video/getRankVideos?pageSize=20&page={pg}&type={extend.get("type", "1")}'
|
||||
data = self.fetch(f'{self.host}{path}', headers=self.headers()).json()['encData']
|
||||
data1 = self.aes(data)['data']
|
||||
result = {}
|
||||
videos = []
|
||||
for k in data1:
|
||||
img = 'https://dg2ordyr4k5v3.cloudfront.net/' + k.get('coverImg')[0]
|
||||
id = f'{k.get("videoId")}?{k.get("userId")}?{k.get("nickName")}'
|
||||
if 'click' in tid:
|
||||
id = id + 'click'
|
||||
videos.append({"vod_id": id, 'vod_name': k.get('title'), 'vod_pic': self.getProxyUrl() + '&url=' + img,
|
||||
'vod_remarks': self.dtim(k.get('playTime')),'style': {"type": "rect", "ratio": 1.33}})
|
||||
result["list"] = videos
|
||||
result["page"] = pg
|
||||
result["pagecount"] = 9999
|
||||
result["limit"] = 90
|
||||
result["total"] = 999999
|
||||
return result
|
||||
|
||||
def detailContent(self, ids):
|
||||
vid = ids[0].replace('click', '').split('?')
|
||||
path = f'/api/video/can/watch?videoId={vid[0]}'
|
||||
data = self.fetch(f'{self.host}{path}', headers=self.headers()).json()['encData']
|
||||
data1 = self.aes(data)['playPath']
|
||||
clj = '[a=cr:' + json.dumps({'id': vid[1] + 'click', 'name': vid[2]}) + '/]' + vid[2] + '[/a]'
|
||||
if 'click' in ids[0]:
|
||||
clj = vid[2]
|
||||
vod = {'vod_director': clj, 'vod_play_from': "推特", 'vod_play_url': vid[2] + "$" + data1}
|
||||
result = {"list": [vod]}
|
||||
return result
|
||||
|
||||
def searchContent(self, key, quick, pg='1'):
|
||||
path = f'/api/search/keyWord?pageSize=20&page={pg}&searchWord={quote(key)}&searchType=1'
|
||||
data = self.fetch(f'{self.host}{path}', headers=self.headers()).json()['encData']
|
||||
data1 = self.aes(data)['videoList']
|
||||
result = {}
|
||||
videos = []
|
||||
for k in data1:
|
||||
img = 'https://dg2ordyr4k5v3.cloudfront.net/' + k.get('coverImg')[0]
|
||||
id = f'{k.get("videoId")}?{k.get("userId")}?{k.get("nickName")}'
|
||||
videos.append({"vod_id": id, 'vod_name': k.get('title'), 'vod_pic': self.getProxyUrl() + '&url=' + img,
|
||||
'vod_remarks': self.dtim(k.get('playTime')), 'style': {"type": "rect", "ratio": 1.33}})
|
||||
result["list"] = videos
|
||||
result["page"] = pg
|
||||
result["pagecount"] = 9999
|
||||
result["limit"] = 90
|
||||
result["total"] = 999999
|
||||
return result
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
result = {"parse": 0, "url": id, "header": {'User-Agent': 'Mozilla/5.0 (Linux; Android 11; M2012K10C Build/RP1A.200720.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/87.0.4280.141 Mobile Safari/537.36;SuiRui/twitter/ver=1.3.4'}}
|
||||
return result
|
||||
|
||||
def localProxy(self, param):
|
||||
return self.imgs(param)
|
||||
|
||||
def imgs(self, param):
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Linux; Android 11; M2012K10C Build/RP1A.200720.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/87.0.4280.141 Mobile Safari/537.36;SuiRui/twitter/ver=1.3.4'}
|
||||
url = param['url']
|
||||
type = url.split('.')[-1].split('_')[0]
|
||||
data = self.fetch(url,headers=headers).content
|
||||
bdata = self.img(data, 100, '2020-zq3-888')
|
||||
return [200, f'image/{type}', bdata]
|
||||
|
||||
def img(self, data: bytes, length: int, key: str):
|
||||
GIF = b'\x47\x49\x46'
|
||||
JPG = b'\xFF\xD8\xFF'
|
||||
PNG = b'\x89\x50\x4E\x47\x0D\x0A\x1A\x0A'
|
||||
|
||||
def is_dont_need_decode_for_gif(data):
|
||||
return len(data) > 2 and data[:3] == GIF
|
||||
|
||||
def is_dont_need_decode_for_jpg(data):
|
||||
return len(data) > 7 and data[:3] == JPG
|
||||
|
||||
def is_dont_need_decode_for_png(data):
|
||||
return len(data) > 7 and data[1:8] == PNG[1:8]
|
||||
|
||||
if is_dont_need_decode_for_png(data):
|
||||
return data
|
||||
elif is_dont_need_decode_for_gif(data):
|
||||
return data
|
||||
elif is_dont_need_decode_for_jpg(data):
|
||||
return data
|
||||
else:
|
||||
key_bytes = key.encode('utf-8')
|
||||
result = bytearray(data)
|
||||
for i in range(length):
|
||||
result[i] ^= key_bytes[i % len(key_bytes)]
|
||||
return bytes(result)
|
349
PY/浴火社APP.py
Normal file
349
PY/浴火社APP.py
Normal file
@ -0,0 +1,349 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# by @嗷呜
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
from base64 import b64decode, b64encode
|
||||
import requests
|
||||
from Crypto.Cipher import AES
|
||||
from Crypto.Hash import MD5
|
||||
from Crypto.Util.Padding import unpad
|
||||
sys.path.append('..')
|
||||
from base.spider import Spider
|
||||
|
||||
|
||||
class Spider(Spider):
|
||||
|
||||
def init(self, extend=""):
|
||||
self.did = self.getdid()
|
||||
self.token=self.gettoken()
|
||||
domain=self.domain()
|
||||
self.phost=self.host_late(domain['domain_preview'])
|
||||
self.bhost=domain['domain_original']
|
||||
self.names=domain['name_original']
|
||||
pass
|
||||
|
||||
def getName(self):
|
||||
pass
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
pass
|
||||
|
||||
def manualVideoCheck(self):
|
||||
pass
|
||||
|
||||
def destroy(self):
|
||||
pass
|
||||
|
||||
host = 'https://lulu-api-92mizw.jcdwn.com'
|
||||
|
||||
headers = {
|
||||
'User-Agent': 'okhttp/4.11.0',
|
||||
'referer': 'https://app.nova-traffic-1688.com',
|
||||
}
|
||||
|
||||
def homeContent(self, filter):
|
||||
BASE_CATEGORIES = [
|
||||
{'type_name': '片商', 'type_id': 'makers'},
|
||||
{'type_name': '演员', 'type_id': 'actor'}
|
||||
]
|
||||
|
||||
SORT_OPTIONS = {
|
||||
'key': 'sortby',
|
||||
'name': 'sortby',
|
||||
'value': [
|
||||
{'n': '最新', 'v': 'on_shelf_at'},
|
||||
{'n': '最热', 'v': 'hot'}
|
||||
]
|
||||
}
|
||||
|
||||
tags = self.getdata('/api/v1/video/tag?current=1&pageSize=100&level=1')
|
||||
producers = self.getdata('/api/v1/video/producer?current=1&pageSize=100&status=1')
|
||||
regions = self.getdata('/api/v1/video/region?current=1&pageSize=100')
|
||||
result = {'class': [], 'filters': {}}
|
||||
result['class'].extend(BASE_CATEGORIES)
|
||||
for category in BASE_CATEGORIES:
|
||||
result['filters'][category['type_id']] = [SORT_OPTIONS]
|
||||
if tags.get('data'):
|
||||
main_tag = tags['data'][0]
|
||||
result['class'].append({
|
||||
'type_name': '发现',
|
||||
'type_id': f'{main_tag["id"]}_tag'
|
||||
})
|
||||
tag_values = [
|
||||
{'n': tag['name'], 'v': f"{tag['id']}_tag"}
|
||||
for tag in tags['data'][1:]
|
||||
if tag.get('id')
|
||||
]
|
||||
result['filters'][f'{main_tag["id"]}_tag'] = [
|
||||
{'key': 'tagtype', 'name': 'tagtype', 'value': tag_values},
|
||||
SORT_OPTIONS
|
||||
]
|
||||
|
||||
region_filter = {
|
||||
'key': 'region_ids',
|
||||
'name': 'region_ids',
|
||||
'value': [
|
||||
{'n': region['name'], 'v': region['id']}
|
||||
for region in regions['data'][1:]
|
||||
if region.get('id')
|
||||
]
|
||||
}
|
||||
self.aid=regions['data'][0]['id']
|
||||
result['filters']['actor'].append({
|
||||
'key': 'region_id',
|
||||
'name': 'region_id',
|
||||
'value': region_filter['value'][:2]
|
||||
})
|
||||
complex_sort = {
|
||||
'key': 'sortby',
|
||||
'name': 'sortby',
|
||||
'value': [
|
||||
{'n': '综合', 'v': 'complex'},
|
||||
*SORT_OPTIONS['value']
|
||||
]
|
||||
}
|
||||
producer_filters = [region_filter, complex_sort]
|
||||
for producer in producers['data']:
|
||||
result['class'].append({
|
||||
'type_name': producer['name'],
|
||||
'type_id': f'{producer["id"]}_sx'
|
||||
})
|
||||
result['filters'][f'{producer["id"]}_sx'] = producer_filters
|
||||
return result
|
||||
|
||||
def homeVideoContent(self):
|
||||
data=self.getdata('/api/v1/video?current=1&pageSize=60®ion_ids=&sortby=complex')
|
||||
return {'list':self.getlist(data)}
|
||||
|
||||
def categoryContent(self, tid, pg, filter, extend):
|
||||
if 'act' in tid:
|
||||
data=self.getact(tid, pg, filter, extend)
|
||||
elif 'tag' in tid:
|
||||
data=self.gettag(tid, pg, filter, extend)
|
||||
elif 'sx' in tid:
|
||||
data=self.getsx(tid, pg, filter, extend)
|
||||
elif 'make' in tid:
|
||||
data=self.getmake(tid, pg, filter, extend)
|
||||
result = {}
|
||||
result['list'] = data
|
||||
result['page'] = pg
|
||||
result['pagecount'] = 9999
|
||||
result['limit'] = 90
|
||||
result['total'] = 999999
|
||||
return result
|
||||
|
||||
def detailContent(self, ids):
|
||||
v=self.getdata(f'/api/v1/video?current=1&pageSize=1&id={ids[0]}&detail=1')
|
||||
v=v['data'][0]
|
||||
vod = {
|
||||
'vod_name': v.get('title'),
|
||||
'type_name': '/'.join(v.get('tag_names',[])),
|
||||
'vod_play_from': '浴火社',
|
||||
'vod_play_url': ''
|
||||
}
|
||||
p=[]
|
||||
for i,j in enumerate(self.bhost):
|
||||
p.append(f'{self.names[i]}${j}{v.get("highres_url") or v.get("preview_url")}@@@{v["id"]}')
|
||||
vod['vod_play_url'] = '#'.join(p)
|
||||
return {'list':[vod]}
|
||||
|
||||
def searchContent(self, key, quick, pg="1"):
|
||||
data=self.getdata(f'/api/v1/video?current={pg}&pageSize=30&title={key}')
|
||||
return {'list':self.getlist(data),'page':pg}
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
url=f'{self.getProxyUrl()}&url={self.e64(id)}&type=m3u8'
|
||||
return {'parse': 0, 'url': url, 'header': self.headers}
|
||||
|
||||
def localProxy(self, param):
|
||||
if param.get('type')=='image':
|
||||
data=self.fetch(param.get('url'), headers=self.headers).text
|
||||
content=b64decode(data.encode('utf-8'))
|
||||
return [200, 'image/png', content]
|
||||
if param.get('type')=='m3u8':
|
||||
ids=self.d64(param.get('url')).split('@@@')
|
||||
data=self.fetch(ids[0], headers=self.headers).text
|
||||
lines = data.strip().split('\n')
|
||||
for index, string in enumerate(lines):
|
||||
if 'URI=' in string:
|
||||
replacement = f'URI="{self.getProxyUrl()}&id={ids[1]}&type=mkey"'
|
||||
lines[index]=re.sub(r'URI="[^"]+"', replacement, string)
|
||||
continue
|
||||
if '#EXT' not in string and 'http' not in string:
|
||||
last_slash_index = ids[0].rfind('/')
|
||||
lpath = ids[0][:last_slash_index + 1]
|
||||
lines[index] = f'{lpath}{string}'
|
||||
data = '\n'.join(lines)
|
||||
return [200, 'audio/x-mpegurl', data]
|
||||
if param.get('type')=='mkey':
|
||||
id=param.get('id')
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Linux; Android 11; M2012K10C Build/RP1A.200720.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/87.0.4280.141 Mobile Safari/537.36',
|
||||
'authdog': self.token
|
||||
}
|
||||
response = self.fetch(f'{self.host}/api/v1/video/key/{id}', headers=headers)
|
||||
type=response.headers.get('Content-Type')
|
||||
return [200, type, response.content]
|
||||
|
||||
def e64(self, text):
|
||||
try:
|
||||
text_bytes = text.encode('utf-8')
|
||||
encoded_bytes = b64encode(text_bytes)
|
||||
return encoded_bytes.decode('utf-8')
|
||||
except Exception as e:
|
||||
print(f"Base64编码错误: {str(e)}")
|
||||
return ""
|
||||
|
||||
def d64(self,encoded_text):
|
||||
try:
|
||||
encoded_bytes = encoded_text.encode('utf-8')
|
||||
decoded_bytes = b64decode(encoded_bytes)
|
||||
return decoded_bytes.decode('utf-8')
|
||||
except Exception as e:
|
||||
print(f"Base64解码错误: {str(e)}")
|
||||
return ""
|
||||
|
||||
def getdid(self):
|
||||
did = self.md5(str(int(time.time() * 1000)))
|
||||
try:
|
||||
if self.getCache('did'):
|
||||
return self.getCache('did')
|
||||
else:
|
||||
self.setCache('did', did)
|
||||
return did
|
||||
except Exception as e:
|
||||
self.setCache('did', did)
|
||||
return did
|
||||
|
||||
def host_late(self, url_list):
|
||||
if isinstance(url_list, str):
|
||||
urls = [u.strip() for u in url_list.split(',')]
|
||||
else:
|
||||
urls = url_list
|
||||
if len(urls) <= 1:
|
||||
return urls[0] if urls else ''
|
||||
results = {}
|
||||
threads = []
|
||||
|
||||
def test_host(url):
|
||||
try:
|
||||
start_time = time.time()
|
||||
response = requests.head(url, timeout=1.0, allow_redirects=False)
|
||||
delay = (time.time() - start_time) * 1000
|
||||
results[url] = delay
|
||||
except Exception as e:
|
||||
results[url] = float('inf')
|
||||
|
||||
for url in urls:
|
||||
t = threading.Thread(target=test_host, args=(url,))
|
||||
threads.append(t)
|
||||
t.start()
|
||||
for t in threads:
|
||||
t.join()
|
||||
return min(results.items(), key=lambda x: x[1])[0]
|
||||
|
||||
def domain(self):
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Linux; Android 11; M2012K10C Build/RP1A.200720.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/87.0.4280.141 Mobile Safari/537.36',
|
||||
}
|
||||
response = self.fetch(f'{self.host}/api/v1/system/domain', headers=headers)
|
||||
return self.aes(response.content)
|
||||
|
||||
def aes(self, word):
|
||||
key = b64decode("amtvaWc5ZnJ2Ym5taml1eQ==")
|
||||
iv = b64decode("AAEFAwQFCQcICQoLDA0ODw==")
|
||||
cipher = AES.new(key, AES.MODE_CBC, iv)
|
||||
decrypted = unpad(cipher.decrypt(word), AES.block_size)
|
||||
return json.loads(decrypted.decode('utf-8'))
|
||||
|
||||
def md5(self, text):
|
||||
h = MD5.new()
|
||||
h.update(text.encode('utf-8'))
|
||||
return h.hexdigest()
|
||||
|
||||
def gettoken(self):
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Linux; Android 11; M2012K10C Build/RP1A.200720.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/87.0.4280.141 Mobile Safari/537.36',
|
||||
'cookei': self.md5(f'{self.did}+android'),
|
||||
'siteid': '11',
|
||||
'siteauthority': 'lls888.tv'
|
||||
}
|
||||
|
||||
json_data = {
|
||||
'app_id': 'jukjoe.zqgpi.hfzvde.sdot',
|
||||
'phone_device': 'Redmi M2012K10C',
|
||||
'device_id': self.did,
|
||||
'device_type': 'android',
|
||||
'invite_code': 'oi1o',
|
||||
'is_first': 1,
|
||||
'os_version': '11',
|
||||
'version': '8.59',
|
||||
}
|
||||
response = self.post(f'{self.host}/api/v1/member/device', headers=headers, json=json_data)
|
||||
tdata = self.aes(response.content)
|
||||
return f'{tdata["token_type"]} {tdata["access_token"]}'
|
||||
|
||||
def getdata(self, path):
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Linux; Android 11; M2012K10C Build/RP1A.200720.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/87.0.4280.141 Mobile Safari/537.36',
|
||||
'authdog': self.token
|
||||
}
|
||||
response = self.fetch(f'{self.host}{path}', headers=headers)
|
||||
return self.aes(response.content)
|
||||
|
||||
def getimg(self, path):
|
||||
if not path.startswith('/'):
|
||||
path = f'/{path}'
|
||||
return f'{self.getProxyUrl()}&url={self.phost}{path}&type=image'
|
||||
|
||||
def getlist(self,data):
|
||||
videos = []
|
||||
for i in data['data']:
|
||||
videos.append({
|
||||
'vod_id': i['id'],
|
||||
'vod_name': i['title'],
|
||||
'vod_pic': self.getimg(i.get('coverphoto_h' or i.get('coverphoto_v'))),
|
||||
'style': {"type": "rect", "ratio": 1.33}})
|
||||
return videos
|
||||
|
||||
def geticon(self, data, st='',style=None):
|
||||
if style is None:style = {"type": "oval"}
|
||||
videos = []
|
||||
for i in data['data']:
|
||||
videos.append({
|
||||
'vod_id': f'{i["id"]}{st}',
|
||||
'vod_name': i['name'],
|
||||
'vod_pic': self.getimg(i.get('icon_path')),
|
||||
'vod_tag': 'folder',
|
||||
'style': style})
|
||||
return videos
|
||||
|
||||
def getact(self, tid, pg, filter, extend):
|
||||
if tid == 'actor' and pg=='1':
|
||||
data = self.getdata(f'/api/v1/video/actor?current=1&pageSize=999®ion_id={extend.get("region_id",self.aid)}&discover_page={pg}')
|
||||
return self.geticon(data, '_act')
|
||||
elif '_act' in tid:
|
||||
data = self.getdata(f'/api/v1/video?current={pg}&pageSize=50&actor_ids={tid.split("_")[0]}&sortby={extend.get("sortby","on_shelf_at")}')
|
||||
return self.getlist(data)
|
||||
|
||||
def gettag(self, tid, pg, filter, extend):
|
||||
if '_tag' in tid:
|
||||
tid=extend.get('tagtype',tid)
|
||||
data=self.getdata(f'/api/v1/video/tag?current={pg}&pageSize=100&level=2&parent_id={tid.split("_")[0]}')
|
||||
return self.geticon(data, '_stag',{"type": "rect", "ratio": 1.33})
|
||||
elif '_stag' in tid:
|
||||
data = self.getdata(f'/api/v1/video?current={pg}&pageSize=50&tag_ids={tid.split("_")[0]}&sortby={extend.get("sortby","on_shelf_at")}')
|
||||
return self.getlist(data)
|
||||
|
||||
def getsx(self, tid, pg, filter, extend):
|
||||
data=self.getdata(f'/api/v1/video?current={pg}&pageSize=20&producer_ids={tid.split("_")[0]}®ion_ids={extend.get("region_ids","")}&sortby={extend.get("sortby","complex")}')
|
||||
return self.getlist(data)
|
||||
|
||||
def getmake(self, tid, pg, filter, extend):
|
||||
if pg=='1':
|
||||
data=self.getdata('/api/v1/video/producer?current=1&pageSize=100&status=1')
|
||||
return self.geticon(data, '_sx',{"type": "rect", "ratio": 1.33})
|
||||
|
301
PY/火车太顺APP.py
Normal file
301
PY/火车太顺APP.py
Normal file
@ -0,0 +1,301 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# by @嗷呜
|
||||
import sys
|
||||
from urllib.parse import urlparse
|
||||
sys.path.append("..")
|
||||
import re
|
||||
import hashlib
|
||||
import hmac
|
||||
import random
|
||||
import string
|
||||
from Crypto.Util.Padding import unpad
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from Crypto.PublicKey import RSA
|
||||
from Crypto.Cipher import PKCS1_v1_5, AES
|
||||
from base64 import b64encode, b64decode
|
||||
import json
|
||||
import time
|
||||
from base.spider import Spider
|
||||
|
||||
|
||||
class Spider(Spider):
|
||||
|
||||
def init(self, extend=""):
|
||||
self.device = self.device_id()
|
||||
self.host = self.gethost()
|
||||
pass
|
||||
|
||||
def getName(self):
|
||||
pass
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
pass
|
||||
|
||||
def manualVideoCheck(self):
|
||||
pass
|
||||
|
||||
def action(self, action):
|
||||
pass
|
||||
|
||||
def destroy(self):
|
||||
pass
|
||||
|
||||
def homeContent(self, filter):
|
||||
result = {}
|
||||
filters = {}
|
||||
classes = []
|
||||
bba = self.url()
|
||||
data = self.fetch(f"{self.host}/api/v1/app/config?pack={bba[0]}&signature={bba[1]}", headers=self.header()).text
|
||||
data1 = self.aes(data)
|
||||
dy = {"class": "类型", "area": "地区", "lang": "语言", "year": "年份", "letter": "字母", "by": "排序",
|
||||
"sort": "排序"}
|
||||
data1['data']['movie_screen']['sort'].pop(0)
|
||||
for item in data1['data']['movie_screen']['sort']:
|
||||
item['n'] = item.pop('name')
|
||||
item['v'] = item.pop('value')
|
||||
for item in data1['data']['movie_screen']['filter']:
|
||||
has_non_empty_field = False
|
||||
classes.append({"type_name": item["name"], "type_id": str(item["id"])})
|
||||
for key in dy:
|
||||
if key in item and item[key]:
|
||||
has_non_empty_field = True
|
||||
break
|
||||
if has_non_empty_field:
|
||||
filters[str(item["id"])] = []
|
||||
filters[str(item["id"])].append(
|
||||
{"key": 'sort', "name": '排序', "value": data1['data']['movie_screen']['sort']})
|
||||
for dkey in item:
|
||||
if dkey in dy and item[dkey]:
|
||||
item[dkey].pop(0)
|
||||
value_array = [
|
||||
{"n": value.strip(), "v": value.strip()}
|
||||
for value in item[dkey]
|
||||
if value.strip() != ""
|
||||
]
|
||||
filters[str(item["id"])].append(
|
||||
{"key": dkey, "name": dy[dkey], "value": value_array}
|
||||
)
|
||||
result["class"] = classes
|
||||
result["filters"] = filters
|
||||
return result
|
||||
|
||||
def homeVideoContent(self):
|
||||
bba = self.url()
|
||||
url = f'{self.host}/api/v1/movie/index_recommend?pack={bba[0]}&signature={bba[1]}'
|
||||
data = self.fetch(url, headers=self.header()).json()
|
||||
videos = []
|
||||
for item in data['data']:
|
||||
if len(item['list']) > 0:
|
||||
for it in item['list']:
|
||||
try:
|
||||
videos.append(self.voides(it))
|
||||
except Exception as e:
|
||||
continue
|
||||
result = {"list": videos}
|
||||
return result
|
||||
|
||||
def categoryContent(self, tid, pg, filter, extend):
|
||||
body = {"type_id": tid, "sort": extend.get("sort", "by_default"), "class": extend.get("class", "类型"),
|
||||
"area": extend.get("area", "地区"), "year": extend.get("year", "年份"), "page": str(pg),
|
||||
"pageSize": "21"}
|
||||
result = {}
|
||||
list = []
|
||||
bba = self.url(body)
|
||||
url = f"{self.host}/api/v1/movie/screen/list?pack={bba[0]}&signature={bba[1]}"
|
||||
data = self.fetch(url, headers=self.header()).json()['data']['list']
|
||||
for item in data:
|
||||
list.append(self.voides(item))
|
||||
result["list"] = list
|
||||
result["page"] = pg
|
||||
result["pagecount"] = 9999
|
||||
result["limit"] = 90
|
||||
result["total"] = 999999
|
||||
return result
|
||||
|
||||
def detailContent(self, ids):
|
||||
body = {"id": ids[0]}
|
||||
bba = self.url(body)
|
||||
url = f'{self.host}/api/v1/movie/detail?pack={bba[0]}&signature={bba[1]}'
|
||||
data = self.fetch(url, headers=self.header()).json()['data']
|
||||
video = {'vod_name': data.get('name'), 'type_name': data.get('type_name'), 'vod_year': data.get('year'),
|
||||
'vod_area': data.get('area'), 'vod_remarks': data.get('dynami'), 'vod_content': data.get('content')}
|
||||
play = []
|
||||
names = []
|
||||
tasks = []
|
||||
for itt in data["play_from"]:
|
||||
name = itt["name"]
|
||||
a = []
|
||||
if len(itt["list"]) > 0:
|
||||
names.append(name)
|
||||
play.append(self.playeach(itt['list']))
|
||||
else:
|
||||
tasks.append({"movie_id": ids[0], "from_code": itt["code"]})
|
||||
names.append(name)
|
||||
if tasks:
|
||||
with ThreadPoolExecutor(max_workers=len(tasks)) as executor:
|
||||
results = executor.map(self.playlist, tasks)
|
||||
for result in results:
|
||||
if result:
|
||||
play.append(result)
|
||||
else:
|
||||
play.append("")
|
||||
video["vod_play_from"] = "$$$".join(names)
|
||||
video["vod_play_url"] = "$$$".join(play)
|
||||
result = {"list": [video]}
|
||||
return result
|
||||
|
||||
def searchContent(self, key, quick, pg=1):
|
||||
body = {"keyword": key, "sort": "", "type_id": "0", "page": str(pg), "pageSize": "10",
|
||||
"res_type": "by_movie_name"}
|
||||
bba = self.url(body)
|
||||
url = f"{self.host}/api/v1/movie/search?pack={bba[0]}&signature={bba[1]}"
|
||||
data = self.fetch(url, headers=self.header()).json()['data'].get('list')
|
||||
videos = []
|
||||
for it in data:
|
||||
try:
|
||||
videos.append(self.voides(it))
|
||||
except Exception as e:
|
||||
continue
|
||||
result = {"list": videos, "page": pg}
|
||||
return result
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
url = id
|
||||
if not re.search(r"\.m3u8|\.mp4", url):
|
||||
try:
|
||||
data = json.loads(b64decode(id.encode('utf-8')).decode('utf-8'))
|
||||
bba = self.url(data)
|
||||
data2 = self.fetch(f"{self.host}/api/v1/movie_addr/parse_url?pack={bba[0]}&signature={bba[1]}",
|
||||
headers=self.header()).json()['data']
|
||||
url = data2.get('play_url') or data2.get('download_url')
|
||||
except Exception as e:
|
||||
pass
|
||||
if re.search(r'\.jpg|\.png|\.jpeg', url):
|
||||
url = self.Mproxy(url)
|
||||
result = {}
|
||||
result["parse"] = 0
|
||||
result["url"] = url
|
||||
result["header"] = {'user-agent': 'okhttp/4.9.2'}
|
||||
return result
|
||||
|
||||
def localProxy(self, param):
|
||||
return self.Mlocal(param)
|
||||
|
||||
def Mproxy(self, url):
|
||||
return self.getProxyUrl() + "&url=" + b64encode(url.encode('utf-8')).decode('utf-8') + "&type=m3u8"
|
||||
|
||||
def Mlocal(self, param,header=None):
|
||||
url = self.d64(param["url"])
|
||||
ydata = self.fetch(url, headers=header, allow_redirects=False)
|
||||
data = ydata.content.decode('utf-8')
|
||||
if ydata.headers.get('Location'):
|
||||
url = ydata.headers['Location']
|
||||
data = self.fetch(url, headers=header).content.decode('utf-8')
|
||||
parsed_url = urlparse(url)
|
||||
durl = parsed_url.scheme + "://" + parsed_url.netloc
|
||||
lines = data.strip().split('\n')
|
||||
for index, string in enumerate(lines):
|
||||
if '#EXT' not in string and 'http' not in string:
|
||||
last_slash_index = string.rfind('/')
|
||||
lpath = string[:last_slash_index + 1]
|
||||
lines[index] = durl + ('' if lpath.startswith('/') else '/') + lpath
|
||||
data = '\n'.join(lines)
|
||||
return [200, "application/vnd.apple.mpegur", data]
|
||||
|
||||
def device_id(self):
|
||||
characters = string.ascii_lowercase + string.digits
|
||||
random_string = ''.join(random.choices(characters, k=32))
|
||||
return random_string
|
||||
|
||||
def gethost(self):
|
||||
try:
|
||||
url = 'https://dns.alidns.com/dns-query'
|
||||
headers = {
|
||||
'User-Agent': 'okhttp/4.9.2',
|
||||
'Accept': 'application/dns-message'
|
||||
}
|
||||
params = {
|
||||
'dns': 'AAABAAABAAAAAAAACWJmbTExYXM5ZgdmdXFpeXVuAmNuAAAcAAE'
|
||||
}
|
||||
response = self.fetch(url, headers=headers, params=params)
|
||||
host=self.parse_dns_name(response.content, 12)
|
||||
return f"https://{host}"
|
||||
except:
|
||||
return "https://bfm11as9f.fuqiyun.cn"
|
||||
|
||||
def parse_dns_name(self, data, offset):
|
||||
parts = []
|
||||
while True:
|
||||
length = data[offset]
|
||||
if length == 0:
|
||||
break
|
||||
offset += 1
|
||||
parts.append(data[offset:offset + length].decode('utf-8'))
|
||||
offset += length
|
||||
return '.'.join(parts)
|
||||
|
||||
def header(self):
|
||||
headers = {
|
||||
'User-Agent': 'Android',
|
||||
'Accept': 'application/prs.55App.v2+json',
|
||||
'timestamp': str(int(time.time())),
|
||||
'x-client-setting': '{"pure-mode":0}',
|
||||
'x-client-uuid': '{"device_id":' + self.device + '}, "type":1,"brand":"Redmi", "model":"M2012K10C", "system_version":30, "sdk_version":"3.1.0.7"}',
|
||||
'x-client-version': '3096 '
|
||||
}
|
||||
return headers
|
||||
|
||||
def url(self, id=None):
|
||||
if not id:
|
||||
id = {}
|
||||
id["timestamp"] = str(int(time.time()))
|
||||
public_key = 'MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA02F/kPg5A2NX4qZ5JSns+bjhVMCC6JbTiTKpbgNgiXU+Kkorg6Dj76gS68gB8llhbUKCXjIdygnHPrxVHWfzmzisq9P9awmXBkCk74Skglx2LKHa/mNz9ivg6YzQ5pQFUEWS0DfomGBXVtqvBlOXMCRxp69oWaMsnfjnBV+0J7vHbXzUIkqBLdXSNfM9Ag5qdRDrJC3CqB65EJ3ARWVzZTTcXSdMW9i3qzEZPawPNPe5yPYbMZIoXLcrqvEZnRK1oak67/ihf7iwPJqdc+68ZYEmmdqwunOvRdjq89fQMVelmqcRD9RYe08v+xDxG9Co9z7hcXGTsUquMxkh29uNawIDAQAB'
|
||||
encrypted_text = json.dumps(id)
|
||||
public_key = RSA.import_key(b64decode(public_key))
|
||||
cipher = PKCS1_v1_5.new(public_key)
|
||||
encrypted_message = cipher.encrypt(encrypted_text.encode('utf-8'))
|
||||
encrypted_message_base64 = b64encode(encrypted_message).decode('utf-8')
|
||||
result = encrypted_message_base64.replace('+', '-').replace('/', '_').replace('=', '')
|
||||
key = '635a580fcb5dc6e60caa39c31a7bde48'
|
||||
sign = hmac.new(key.encode(), result.encode(), hashlib.md5).hexdigest()
|
||||
return result, sign
|
||||
|
||||
def playlist(self, body):
|
||||
try:
|
||||
bba = self.url(body)
|
||||
url = f'{self.host}/api/v1/movie_addr/list?pack={bba[0]}&signature={bba[1]}'
|
||||
data = self.fetch(url, headers=self.header()).json()['data']
|
||||
return self.playeach(data)
|
||||
except Exception:
|
||||
return []
|
||||
|
||||
def playeach(self, data):
|
||||
play_urls = []
|
||||
for it in data:
|
||||
if re.search(r"mp4|m3u8", it["play_url"]):
|
||||
play_urls.append(f"{it['episode_name']}${it['play_url']}")
|
||||
else:
|
||||
vd={"from_code": it['from_code'], "play_url": it['play_url'], "episode_id": it['episode_id'], "type": "play"}
|
||||
play_urls.append(
|
||||
f"{it['episode_name']}${b64encode(json.dumps(vd).encode('utf-8')).decode('utf-8')}"
|
||||
)
|
||||
return '#'.join(play_urls)
|
||||
|
||||
def voides(self, item):
|
||||
if item['name'] or item['title']:
|
||||
voide = {
|
||||
"vod_id": item.get('id') or item.get('click'),
|
||||
'vod_name': item.get('name') or item.get('title'),
|
||||
'vod_pic': item.get('cover') or item.get('image'),
|
||||
'vod_year': item.get('year') or item.get('label'),
|
||||
'vod_remarks': item.get('dynamic') or item.get('sub_title')
|
||||
}
|
||||
return voide
|
||||
|
||||
def aes(self, text):
|
||||
text = text.replace('-', '+').replace('_', '/') + '=='
|
||||
key = b"e6d5de5fcc51f53d"
|
||||
iv = b"2f13eef7dfc6c613"
|
||||
cipher = AES.new(key, AES.MODE_CBC, iv)
|
||||
pt = unpad(cipher.decrypt(b64decode(text)), AES.block_size).decode("utf-8")
|
||||
return json.loads(pt)
|
248
PY/爱.py
Normal file
248
PY/爱.py
Normal file
@ -0,0 +1,248 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# by @嗷呜
|
||||
import random
|
||||
import sys
|
||||
from base64 import b64encode, b64decode
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from urllib.parse import urlencode
|
||||
sys.path.append('..')
|
||||
from base.spider import Spider
|
||||
|
||||
|
||||
class Spider(Spider):
|
||||
|
||||
def init(self, extend=""):
|
||||
self.did = self.random_str(32)
|
||||
pass
|
||||
|
||||
def getName(self):
|
||||
pass
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
pass
|
||||
|
||||
def manualVideoCheck(self):
|
||||
pass
|
||||
|
||||
def destroy(self):
|
||||
pass
|
||||
|
||||
rhost = 'https://www.iqiyi.com'
|
||||
|
||||
hhost='https://mesh.if.iqiyi.com'
|
||||
|
||||
dhost='https://miniapp.iqiyi.com'
|
||||
|
||||
headers = {
|
||||
'Origin': rhost,
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36',
|
||||
'Referer': f'{rhost}/',
|
||||
}
|
||||
|
||||
def homeContent(self, filter):
|
||||
result = {}
|
||||
cateManual = {
|
||||
"全部": "1009",
|
||||
"电影": "1",
|
||||
"剧集": "2",
|
||||
"综艺": "6",
|
||||
"动漫": "4",
|
||||
"儿童": "15",
|
||||
"微剧": "35",
|
||||
"纪录片": "3"
|
||||
}
|
||||
classes = []
|
||||
filters = {}
|
||||
for k in cateManual:
|
||||
classes.append({
|
||||
'type_name': k,
|
||||
'type_id': cateManual[k]
|
||||
})
|
||||
with ThreadPoolExecutor(max_workers=len(classes)) as executor:
|
||||
results = executor.map(self.getf, classes)
|
||||
for id, ft in results:
|
||||
if len(ft):filters[id] = ft
|
||||
result['class'] = classes
|
||||
result['filters'] = filters
|
||||
return result
|
||||
|
||||
def homeVideoContent(self):
|
||||
data=self.fetch(f'{self.hhost}/portal/lw/v5/channel/recommend?v=13.014.21150', headers=self.headers).json()
|
||||
vlist = []
|
||||
for i in data['items'][1:]:
|
||||
for j in i['video'][0]['data']:
|
||||
id = j.get('firstId')
|
||||
pic=j.get('prevue',{}).get('image_url') or j.get('album_image_url_hover')
|
||||
if id and pic:
|
||||
pu=j.get('prevue',{}).get('page_url') or j.get('page_url').split('?')[0]
|
||||
id = f'{id}@{self.e64(pu)}'
|
||||
vlist.append({
|
||||
'vod_id': id,
|
||||
'vod_name': j.get('display_name'),
|
||||
'vod_pic': pic,
|
||||
'vod_year': j.get('sns_score'),
|
||||
'vod_remarks': j.get('dq_updatestatus') or j.get('rank_prefix')
|
||||
})
|
||||
return {'list':vlist}
|
||||
|
||||
def categoryContent(self, tid, pg, filter, extend):
|
||||
if pg == "1":
|
||||
self.sid = ''
|
||||
new_data = {'mode':'24'}
|
||||
for key, value in extend.items():
|
||||
if value:
|
||||
key_value_pairs = self.d64(value).split(',')
|
||||
for pair in key_value_pairs:
|
||||
k, v = pair.split('=')
|
||||
if k in new_data:
|
||||
new_data[k] += "," + v
|
||||
else:
|
||||
new_data[k] = v
|
||||
path=f'/portal/lw/videolib/data?uid=&passport_id=&ret_num=60&version=13.014.21150&device_id={self.did}&channel_id={tid}&page_id={pg}&session={self.sid}&os=&conduit_id=&vip=0&auth&recent_selected_tag=&ad=%5B%7B%22lm%22:%225%22,%22ai%22:%225%22,%22fp%22:%226%22,%22sei%22:%22Sa867aa9d326e2bd8654d8c2a8636055e%22,%22position%22:%22library%22%7D%5D&adExt=%7B%22r%22:%221.2.1-ares6-pure%22%7D&dfp=a12f96215b2f7842a98c082799ca0c3d9236be00946701b106829754d8ece3aaf8&filter={urlencode(new_data)}'
|
||||
data=self.fetch(f'{self.hhost}{path}', headers=self.headers).json()
|
||||
self.sid = data['session']
|
||||
videos = []
|
||||
for i in data['data']:
|
||||
id = i.get('firstId') or i.get('tv_id')
|
||||
if not id:
|
||||
id=i.get('play_url').split(';')[0].split('=')[-1]
|
||||
if id and not i.get('h'):
|
||||
id=f'{id}@{self.e64(i.get("page_url"))}'
|
||||
videos.append({
|
||||
'vod_id': id,
|
||||
'vod_name': i.get('display_name'),
|
||||
'vod_pic': i.get('album_image_url_hover'),
|
||||
'vod_year': i.get('sns_score'),
|
||||
'vod_remarks': i.get('dq_updatestatus') or i.get('pay_mark')
|
||||
})
|
||||
result = {}
|
||||
result['list'] = videos
|
||||
result['page'] = pg
|
||||
result['pagecount'] = 9999
|
||||
result['limit'] = 90
|
||||
result['total'] = 999999
|
||||
return result
|
||||
|
||||
def detailContent(self, ids):
|
||||
ids = ids[0].split('@')
|
||||
ids[-1] = self.d64(ids[-1])
|
||||
data = self.fetch(f'{self.dhost}/h5/mina/baidu/play/body/v1/{ids[0]}/', headers=self.headers).json()
|
||||
v=data['data']['playInfo']
|
||||
vod = {
|
||||
'vod_name': v.get('albumName'),
|
||||
'type_name': v.get('tags'),
|
||||
'vod_year': v.get('albumYear'),
|
||||
'vod_remarks': v.get('updateStrategy'),
|
||||
'vod_actor': v.get('mainActors'),
|
||||
'vod_director': v.get('directors'),
|
||||
'vod_content': v.get('albumDesc'),
|
||||
'vod_play_from': '爱奇艺',
|
||||
'vod_play_url': ''
|
||||
}
|
||||
if data.get('data') and data['data'].get('videoList') and data['data']['videoList'].get('videos'):
|
||||
purl=[f'{i["shortTitle"]}${i["pageUrl"]}' for i in data['data']['videoList']['videos']]
|
||||
pg=data['data']['videoList'].get('totalPages')
|
||||
if pg and pg > 1:
|
||||
id = v['albumId']
|
||||
pages = list(range(2, pg + 1))
|
||||
page_results = {}
|
||||
with ThreadPoolExecutor(max_workers=10) as executor:
|
||||
future_to_page = {
|
||||
executor.submit(self.fetch_page_data, page, id): page
|
||||
for page in pages
|
||||
}
|
||||
for future in as_completed(future_to_page):
|
||||
page = future_to_page[future]
|
||||
try:
|
||||
result = future.result()
|
||||
page_results[page] = result
|
||||
except Exception as e:
|
||||
print(f"Error fetching page {page}: {e}")
|
||||
for page in sorted(page_results.keys()):
|
||||
purl.extend(page_results[page])
|
||||
vod['vod_play_url'] = '#'.join(purl)
|
||||
else:
|
||||
vdata=self.fetch(f'{self.dhost}/h5/mina/baidu/play/head/v1/{ids[0]}/', headers=self.headers).json()
|
||||
v=vdata['data']['playInfo']
|
||||
vod = {
|
||||
'vod_name': v.get('shortTitle'),
|
||||
'type_name': v.get('channelName'),
|
||||
'vod_year': v.get('year'),
|
||||
'vod_remarks': v.get('focus'),
|
||||
'vod_actor': v.get('mainActors'),
|
||||
'vod_director': v.get('directors'),
|
||||
'vod_content': v.get('desc'),
|
||||
'vod_play_from': '爱奇艺',
|
||||
'vod_play_url': f'{v.get("shortTitle")}${ids[-1]}'
|
||||
}
|
||||
return {'list':[vod]}
|
||||
|
||||
def searchContent(self, key, quick, pg="1"):
|
||||
data=self.fetch(f'{self.hhost}/portal/lw/search/homePageV3?key={key}¤t_page={pg}&mode=1&source=input&suggest=&version=13.014.21150&pageNum={pg}&pageSize=25&pu=&u={self.did}&scale=150&token=&userVip=0&conduit=&vipType=-1&os=&osShortName=win10&dataType=&appMode=', headers=self.headers).json()
|
||||
videos = []
|
||||
vdata=data['data']['templates']
|
||||
for i in data['data']['templates']:
|
||||
if i.get('intentAlbumInfos'):
|
||||
vdata=[{'albumInfo': c} for c in i['intentAlbumInfos']]+vdata
|
||||
|
||||
for i in vdata:
|
||||
if i.get('albumInfo') and (i['albumInfo'].get('playQipuId','') or i['albumInfo'].get('qipuId')) and i['albumInfo'].get('pageUrl'):
|
||||
b=i['albumInfo']
|
||||
id=f"{(b.get('playQipuId','') or b.get('qipuId'))}@{self.e64(b.get('pageUrl'))}"
|
||||
videos.append({
|
||||
'vod_id': id,
|
||||
'vod_name': b.get('title'),
|
||||
'vod_pic': b.get('img'),
|
||||
'vod_year': (b.get('year',{}) or {}).get('value'),
|
||||
'vod_remarks': b.get('subscriptContent') or b.get('channel') or b.get('vipTips')
|
||||
})
|
||||
return {'list':videos,'page':pg}
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
return {'jx':1,'parse': 1, 'url': id, 'header': ''}
|
||||
|
||||
def localProxy(self, param):
|
||||
pass
|
||||
|
||||
def fetch_page_data(self, page, id):
|
||||
try:
|
||||
url = f'{self.dhost}/h5/mina/avlist/{page}/{id}/'
|
||||
data = self.fetch(url, headers=self.headers).json()
|
||||
return [f'{i["shortTitle"]}${i["pageUrl"]}' for i in data['data']['videoList']['videos']]
|
||||
except:
|
||||
return []
|
||||
|
||||
def getf(self,body):
|
||||
data=self.fetch(f'{self.hhost}/portal/lw/videolib/tag?channel_id={body["type_id"]}&tagAdd=&selected_tag_name=&version=13.014.21150&device={self.did}&uid=', headers=self.headers).json()
|
||||
ft = []
|
||||
# for i in data[:-1]:
|
||||
for i in data:
|
||||
try:
|
||||
value_array = [{"n": value['text'], "v": self.e64(value['tag_param'])} for value in i['tags'] if
|
||||
value.get('tag_param')]
|
||||
ft.append({"key": i['group'], "name": i['group'], "value": value_array})
|
||||
except:
|
||||
print(i)
|
||||
return (body['type_id'], ft)
|
||||
|
||||
def e64(self, text):
|
||||
try:
|
||||
text_bytes = text.encode('utf-8')
|
||||
encoded_bytes = b64encode(text_bytes)
|
||||
return encoded_bytes.decode('utf-8')
|
||||
except Exception as e:
|
||||
print(f"Base64编码错误: {str(e)}")
|
||||
return ""
|
||||
|
||||
def d64(self,encoded_text: str):
|
||||
try:
|
||||
encoded_bytes = encoded_text.encode('utf-8')
|
||||
decoded_bytes = b64decode(encoded_bytes)
|
||||
return decoded_bytes.decode('utf-8')
|
||||
except Exception as e:
|
||||
print(f"Base64解码错误: {str(e)}")
|
||||
return ""
|
||||
|
||||
def random_str(self,length=16):
|
||||
hex_chars = '0123456789abcdef'
|
||||
return ''.join(random.choice(hex_chars) for _ in range(length))
|
165
PY/爱瓜TVAPP.py
Normal file
165
PY/爱瓜TVAPP.py
Normal file
@ -0,0 +1,165 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# by @嗷呜
|
||||
# 温馨提示:搜索只能搜拼音联想
|
||||
import sys
|
||||
import time
|
||||
import uuid
|
||||
from Crypto.Hash import MD5
|
||||
sys.path.append('..')
|
||||
from base.spider import Spider
|
||||
|
||||
|
||||
class Spider(Spider):
|
||||
|
||||
def init(self, extend=""):
|
||||
self.uid = self.getuid()
|
||||
self.token, self.code = self.getuserinfo()
|
||||
pass
|
||||
|
||||
def getName(self):
|
||||
pass
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
pass
|
||||
|
||||
def manualVideoCheck(self):
|
||||
pass
|
||||
|
||||
def destroy(self):
|
||||
pass
|
||||
|
||||
host = 'https://tvapi211.magicetech.com'
|
||||
|
||||
headers = {'User-Agent': 'okhttp/3.11.0'}
|
||||
|
||||
def homeContent(self, filter):
|
||||
body = {'token': self.token, 'authcode': self.code}
|
||||
data = self.post(f'{self.host}/hr_1_1_0/apptvapi/web/index.php/video/filter-header', json=self.getbody(body),
|
||||
headers=self.headers).json()
|
||||
result = {}
|
||||
classes = []
|
||||
filters = {}
|
||||
for k in data['data']:
|
||||
classes.append({
|
||||
'type_name': k['channel_name'],
|
||||
'type_id': str(k['channel_id']),
|
||||
})
|
||||
filters[str(k['channel_id'])] = []
|
||||
for i in k['search_box']:
|
||||
if len(i['list']):
|
||||
filters[str(k['channel_id'])].append({
|
||||
'key': i['field'],
|
||||
'name': i['label'],
|
||||
'value': [{'n': j['display'], 'v': str(j['value'])} for j in i['list'] if j['value']]
|
||||
})
|
||||
result['class'] = classes
|
||||
result['filters'] = filters
|
||||
return result
|
||||
|
||||
def homeVideoContent(self):
|
||||
body = {'token': self.token, 'authcode': self.code}
|
||||
data = self.post(f'{self.host}/hr_1_1_0/apptvapi/web/index.php/video/index-tv', json=self.getbody(body),
|
||||
headers=self.headers).json()
|
||||
return {'list': self.getlist(data['data'][0]['banner'])}
|
||||
|
||||
def categoryContent(self, tid, pg, filter, extend):
|
||||
body = {'token': self.token, 'authcode': self.code, 'channel_id': tid, 'area': extend.get('area', '0'),
|
||||
'year': extend.get('year', '0'), 'sort': extend.get('sort', '0'), 'tag': extend.get('tag', 'hot'),
|
||||
'status': extend.get('status', '0'), 'page_num': pg, 'page_size': '24'}
|
||||
data = self.post(f'{self.host}/hr_1_1_0/apptvapi/web/index.php/video/filter-video', json=self.getbody(body),
|
||||
headers=self.headers).json()
|
||||
result = {}
|
||||
result['list'] = self.getlist(data['data']['list'])
|
||||
result['page'] = pg
|
||||
result['pagecount'] = 9999
|
||||
result['limit'] = 90
|
||||
result['total'] = 999999
|
||||
return result
|
||||
|
||||
def detailContent(self, ids):
|
||||
ids = ids[0].split('@')
|
||||
body = {'token': self.token, 'authcode': self.code, 'channel_id': ids[0], 'video_id': ids[1]}
|
||||
data = self.post(f'{self.host}/hr_1_1_0/apptvapi/web/index.php/video/detail', json=self.getbody(body),
|
||||
headers=self.headers).json()
|
||||
vdata = {}
|
||||
for k in data['data']['chapters']:
|
||||
i = k['sourcelist']
|
||||
for j in i:
|
||||
if j['source_name'] not in vdata: vdata[j['source_name']] = []
|
||||
vdata[j['source_name']].append(f"{k['title']}${j['source_url']}")
|
||||
plist, names = [], []
|
||||
for key, value in vdata.items():
|
||||
names.append(key)
|
||||
plist.append('#'.join(value))
|
||||
vod = {
|
||||
'vod_play_from': '$$$'.join(names),
|
||||
'vod_play_url': '$$$'.join(plist),
|
||||
}
|
||||
return {'list': [vod]}
|
||||
|
||||
def searchContent(self, key, quick, pg="1"):
|
||||
body = {'token': self.token, 'authcode': self.code, 'keyword': key, 'page_num': pg}
|
||||
data = self.post(f'{self.host}/hr_1_1_0/apptvapi/web/index.php/search/letter-result', json=self.getbody(body),
|
||||
headers=self.headers).json()
|
||||
return {'list': self.getlist(data['data']['list'])}
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
# https://rysp.tv
|
||||
# https://aigua.tv
|
||||
result = {
|
||||
"parse": 0,
|
||||
"url": "id",
|
||||
"header": {
|
||||
"User-Agent": "Dalvik/2.1.0 (Linux; U; Android 11; M2012K10C Build/RP1A.200720.011)",
|
||||
"Origin": "https://aigua.tv",
|
||||
"Referer": "https://aigua.tv/"
|
||||
}
|
||||
}
|
||||
return result
|
||||
|
||||
def localProxy(self, param):
|
||||
pass
|
||||
|
||||
def getuserinfo(self):
|
||||
data = self.post(f'{self.host}/hr_1_1_0/apptvapi/web/index.php/user/auth-login', json=self.getbody(),
|
||||
headers=self.headers).json()
|
||||
v = data['data']
|
||||
return v['user_token'], v['authcode']
|
||||
|
||||
def getuid(self):
|
||||
uid = self.getCache('uid')
|
||||
if not uid:
|
||||
uid = str(uuid.uuid4())
|
||||
self.setCache('uid', uid)
|
||||
return uid
|
||||
|
||||
def getbody(self, json_data=None):
|
||||
if json_data is None: json_data = {}
|
||||
params = {"product": "4", "ver": "1.1.0", "debug": "1", "appId": "1", "osType": "3", "marketChannel": "tv",
|
||||
"sysVer": "11", "time": str(int(time.time())), "packageName": "com.gzsptv.gztvvideo",
|
||||
"udid": self.uid, }
|
||||
json_data.update(params)
|
||||
sorted_json = dict(sorted(json_data.items(), key=lambda item: item[0]))
|
||||
text = '&'.join(f"{k}={v}" for k, v in sorted_json.items() if v != '')
|
||||
md5_hash = self.md5(f"jI7POOBbmiUZ0lmi{text}D9ShYdN51ksWptpkTu11yenAJu7Zu3cR").upper()
|
||||
json_data.update({'sign': md5_hash})
|
||||
return json_data
|
||||
|
||||
def md5(self, text):
|
||||
h = MD5.new()
|
||||
h.update(text.encode('utf-8'))
|
||||
return h.hexdigest()
|
||||
|
||||
def getlist(self, data):
|
||||
videos = []
|
||||
for i in data:
|
||||
if type(i.get('video')) == dict: i = i['video']
|
||||
videos.append({
|
||||
'vod_id': f"{i.get('channel_id')}@{i.get('video_id')}",
|
||||
'vod_name': i.get('video_name'),
|
||||
'vod_pic': i.get('cover'),
|
||||
'vod_year': i.get('score'),
|
||||
'vod_remarks': i.get('flag'),
|
||||
})
|
||||
return videos
|
||||
|
101
PY/甜圈短剧.py
Normal file
101
PY/甜圈短剧.py
Normal file
@ -0,0 +1,101 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# by @嗷呜
|
||||
import sys
|
||||
from pyquery import PyQuery as pq
|
||||
sys.path.append('..')
|
||||
from base.spider import Spider
|
||||
|
||||
|
||||
class Spider(Spider):
|
||||
|
||||
def init(self, extend=""):
|
||||
pass
|
||||
|
||||
def getName(self):
|
||||
pass
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
pass
|
||||
|
||||
def manualVideoCheck(self):
|
||||
pass
|
||||
|
||||
def destroy(self):
|
||||
pass
|
||||
|
||||
host='https://mov.cenguigui.cn'
|
||||
|
||||
ahost='https://api.cenguigui.cn'
|
||||
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
|
||||
'sec-ch-ua-platform': '"macOS"',
|
||||
'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="134", "Google Chrome";v="134"',
|
||||
'DNT': '1',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'Sec-Fetch-Site': 'cross-site',
|
||||
'Sec-Fetch-Mode': 'no-cors',
|
||||
'Sec-Fetch-Dest': 'video',
|
||||
'Sec-Fetch-Storage-Access': 'active',
|
||||
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
|
||||
}
|
||||
|
||||
def homeContent(self, filter):
|
||||
data=pq(self.fetch(self.host, headers=self.headers).text)
|
||||
result = {}
|
||||
classes = []
|
||||
for k in data('.overflow-auto button').items():
|
||||
classes.append({
|
||||
'type_name': k.text(),
|
||||
'type_id': k.text()
|
||||
})
|
||||
result['class'] = classes
|
||||
return result
|
||||
|
||||
def homeVideoContent(self):
|
||||
pass
|
||||
|
||||
def categoryContent(self, tid, pg, filter, extend):
|
||||
params = {
|
||||
'classname': tid,
|
||||
'offset': str((int(pg) - 1)),
|
||||
}
|
||||
data = self.fetch(f'{self.ahost}/api/duanju/api.php', params=params, headers=self.headers).json()
|
||||
videos = []
|
||||
for k in data['data']:
|
||||
videos.append({
|
||||
'vod_id': k.get('book_id'),
|
||||
'vod_name': k.get('title'),
|
||||
'vod_pic': k.get('cover'),
|
||||
'vod_year': k.get('score'),
|
||||
'vod_remarks': f"{k.get('sub_title')}|{k.get('episode_cnt')}"
|
||||
})
|
||||
result = {}
|
||||
result['list'] = videos
|
||||
result['page'] = pg
|
||||
result['pagecount'] = 9999
|
||||
result['limit'] = 90
|
||||
result['total'] = 999999
|
||||
return result
|
||||
|
||||
def detailContent(self, ids):
|
||||
v=self.fetch(f'{self.ahost}/api/duanju/api.php', params={'book_id': ids[0]}, headers=self.headers).json()
|
||||
vod = {
|
||||
'type_name': v.get('category'),
|
||||
'vod_year': v.get('time'),
|
||||
'vod_remarks': v.get('duration'),
|
||||
'vod_content': v.get('desc'),
|
||||
'vod_play_from': '嗷呜爱看短剧',
|
||||
'vod_play_url': '#'.join([f"{i['title']}${i['video_id']}" for i in v['data']])
|
||||
}
|
||||
return {'list':[vod]}
|
||||
|
||||
def searchContent(self, key, quick, pg="1"):
|
||||
return self.categoryContent(key, pg, True, {})
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
data=self.fetch(f'{self.ahost}/api/duanju/api.php', params={'video_id': id}, headers=self.headers).json()
|
||||
return {'parse': 0, 'url': data['data']['url'], 'header': self.headers}
|
||||
|
||||
def localProxy(self, param):
|
||||
pass
|
127
PY/红果网页.py
Normal file
127
PY/红果网页.py
Normal file
@ -0,0 +1,127 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# by @嗷呜
|
||||
import re
|
||||
import sys
|
||||
from pyquery import PyQuery as pq
|
||||
sys.path.append('..')
|
||||
from base.spider import Spider
|
||||
|
||||
class Spider(Spider):
|
||||
|
||||
def init(self, extend=""):
|
||||
pass
|
||||
|
||||
def getName(self):
|
||||
pass
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
pass
|
||||
|
||||
def manualVideoCheck(self):
|
||||
pass
|
||||
|
||||
def destroy(self):
|
||||
pass
|
||||
|
||||
host='https://www.hongguodj.cc'
|
||||
|
||||
headers = {
|
||||
'Accept': '*/*',
|
||||
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
|
||||
'Cache-Control': 'no-cache',
|
||||
'Connection': 'keep-alive',
|
||||
'DNT': '1',
|
||||
'Origin': host,
|
||||
'Pragma': 'no-cache',
|
||||
'Sec-Fetch-Dest': 'empty',
|
||||
'Sec-Fetch-Mode': 'cors',
|
||||
'Sec-Fetch-Site': 'cross-site',
|
||||
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
|
||||
'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="134", "Google Chrome";v="134"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-platform': '"macOS"',
|
||||
}
|
||||
|
||||
def homeContent(self, filter):
|
||||
result = {}
|
||||
classes = []
|
||||
vlist = []
|
||||
data = pq(self.fetch(self.host, headers=self.headers).text)
|
||||
for i in list(data('.slip li').items())[1:]:
|
||||
classes.append({
|
||||
'type_name': i.text(),
|
||||
'type_id': re.findall(r'\d+', i('a').attr('href'))[0]
|
||||
})
|
||||
for i in data('.wrap .rows').items():
|
||||
vlist.extend(self.getlist(i('li')))
|
||||
result['class'] = classes
|
||||
result['list'] = vlist
|
||||
return result
|
||||
|
||||
def homeVideoContent(self):
|
||||
pass
|
||||
|
||||
def categoryContent(self, tid, pg, filter, extend):
|
||||
data=pq(self.fetch(f'{self.host}/type/{tid}-{pg}.html', headers=self.headers).text)
|
||||
result = {}
|
||||
result['list'] = self.getlist(data('.list ul li'))
|
||||
result['page'] = pg
|
||||
result['pagecount'] = 9999
|
||||
result['limit'] = 90
|
||||
result['total'] = 999999
|
||||
return result
|
||||
|
||||
def detailContent(self, ids):
|
||||
data=pq(self.fetch(f'{self.host}{ids[0]}', headers=self.headers).text)
|
||||
v=data('.info')
|
||||
p=v('p')
|
||||
vod = {
|
||||
'vod_name': v('h1').text(),
|
||||
'type_name': p.eq(2).text(),
|
||||
'vod_year': p.eq(3).text(),
|
||||
'vod_area': p.eq(4).text(),
|
||||
'vod_remarks': v('em').text(),
|
||||
'vod_actor': p.eq(0).text(),
|
||||
'vod_director': p.eq(1).text(),
|
||||
'vod_content': data('#desc .text').text(),
|
||||
'vod_play_from': '',
|
||||
'vod_play_url': ''
|
||||
}
|
||||
names = [i.text() for i in data('.title.slip a').items()]
|
||||
plist=[]
|
||||
for i in data('.play-list ul').items():
|
||||
plist.append('#'.join([f'{j("a").text()}${j("a").attr("href")}' for j in i('li').items()]))
|
||||
vod['vod_play_from'] = '$$$'.join(names)
|
||||
vod['vod_play_url'] = '$$$'.join(plist)
|
||||
return {'list': [vod]}
|
||||
|
||||
def searchContent(self, key, quick, pg="1"):
|
||||
data=pq(self.fetch(f'{self.host}/search/{key}----------{pg}---.html', headers=self.headers).text)
|
||||
return {'list': self.getlist(data('.show.rows li')),'page':pg}
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
p=0
|
||||
uid=f'{self.host}{id}'
|
||||
data=pq(self.fetch(uid, headers=self.headers).text)
|
||||
url=data('.video.ratio').attr('data-play')
|
||||
if not url:
|
||||
url = uid
|
||||
p = 1
|
||||
return {'parse': p, 'url': url, 'header': self.headers}
|
||||
|
||||
def localProxy(self, param):
|
||||
pass
|
||||
|
||||
def getlist(self,data):
|
||||
vlist = []
|
||||
for j in data.items():
|
||||
vlist.append({
|
||||
'vod_id': j('a').attr('href'),
|
||||
'vod_name': j('img').attr('alt'),
|
||||
'vod_pic': self.host + j('img').attr('data-src'),
|
||||
'vod_year': j('.bg').text(),
|
||||
'vod_remarks': j('p').text()
|
||||
})
|
||||
return vlist
|
||||
|
||||
|
147
PY/绝对影视.py
Normal file
147
PY/绝对影视.py
Normal file
@ -0,0 +1,147 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# by @嗷呜
|
||||
import base64
|
||||
import re
|
||||
import sys
|
||||
from Crypto.Cipher import AES
|
||||
from Crypto.Util.Padding import unpad
|
||||
from pyquery import PyQuery as pq
|
||||
sys.path.append('..')
|
||||
from base.spider import Spider
|
||||
|
||||
|
||||
class Spider(Spider):
|
||||
|
||||
def init(self, extend=""):
|
||||
pass
|
||||
|
||||
def getName(self):
|
||||
pass
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
pass
|
||||
|
||||
def manualVideoCheck(self):
|
||||
pass
|
||||
|
||||
def destroy(self):
|
||||
pass
|
||||
|
||||
host = 'https://www.jdys.art'
|
||||
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
|
||||
'sec-ch-ua-platform': '"macOS"',
|
||||
'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="134", "Google Chrome";v="134"',
|
||||
'dnt': '1',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'origin': host,
|
||||
'sec-fetch-site': 'cross-site',
|
||||
'sec-fetch-mode': 'cors',
|
||||
'sec-fetch-dest': 'empty',
|
||||
'referer': f'{host}/',
|
||||
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
|
||||
'priority': 'u=1, i',
|
||||
}
|
||||
|
||||
def homeContent(self, filter):
|
||||
data = self.getpq(self.fetch(self.host, headers=self.headers).text)
|
||||
result = {}
|
||||
classes = []
|
||||
for k in list(data('.navtop .navlist li').items())[:9]:
|
||||
classes.append({
|
||||
'type_name': k('a').text(),
|
||||
'type_id': k('a').attr('href'),
|
||||
})
|
||||
result['class'] = classes
|
||||
result['list'] = self.getlist(data('.mi_btcon .bt_img ul li'))
|
||||
return result
|
||||
|
||||
def homeVideoContent(self):
|
||||
pass
|
||||
|
||||
def categoryContent(self, tid, pg, filter, extend):
|
||||
data = self.getpq(self.fetch(f"{tid}{'' if pg == '1' else f'page/{pg}/'}", headers=self.headers).text)
|
||||
result = {}
|
||||
result['list'] = self.getlist(data('.mi_cont .bt_img ul li'))
|
||||
result['page'] = pg
|
||||
result['pagecount'] = 9999
|
||||
result['limit'] = 90
|
||||
result['total'] = 999999
|
||||
return result
|
||||
|
||||
def detailContent(self, ids):
|
||||
data = self.getpq(self.fetch(ids[0], headers=self.headers).text)
|
||||
data2 = data('.moviedteail_list li')
|
||||
vod = {
|
||||
'vod_name': data('.dytext h1').text(),
|
||||
'type_name': data2.eq(0).text(),
|
||||
'vod_year': data2.eq(2).text(),
|
||||
'vod_area': data2.eq(1).text(),
|
||||
'vod_remarks': data2.eq(4).text(),
|
||||
'vod_actor': data2.eq(7).text(),
|
||||
'vod_director': data2.eq(5).text(),
|
||||
'vod_content': data('.yp_context').text().strip()
|
||||
}
|
||||
vdata = data('.paly_list_btn a')
|
||||
play = []
|
||||
for i in vdata.items():
|
||||
a = i.text() + "$" + i.attr.href
|
||||
play.append(a)
|
||||
vod["vod_play_from"] = "在线播放"
|
||||
vod["vod_play_url"] = "#".join(play)
|
||||
result = {"list": [vod]}
|
||||
return result
|
||||
|
||||
def searchContent(self, key, quick, pg="1"):
|
||||
data = self.getpq(self.fetch(f"{self.host}/page/{pg}/?s={key}", headers=self.headers).text)
|
||||
return {'list': self.getlist(data('.mi_cont .bt_img ul li')), 'page': pg}
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
data = self.getpq(self.fetch(id, headers=self.headers).text)
|
||||
try:
|
||||
sc = data('.videoplay script').eq(-1).text()
|
||||
strd = re.findall(r'var\s+[^=]*=\s*"([^"]*)";', sc)
|
||||
kdata = re.findall(r'parse\((.*?)\);', sc)
|
||||
jm = self.aes(strd[0], kdata[0].replace('"', ''), kdata[1].replace('"', ''))
|
||||
url = re.search(r'url: "(.*?)"', jm).group(1)
|
||||
p = 0
|
||||
except:
|
||||
p = 1
|
||||
url = id
|
||||
result = {}
|
||||
result["parse"] = p
|
||||
result["url"] = url
|
||||
result["header"] = self.headers
|
||||
return result
|
||||
|
||||
def localProxy(self, param):
|
||||
pass
|
||||
|
||||
def getpq(self, text):
|
||||
try:
|
||||
return pq(text)
|
||||
except Exception as e:
|
||||
print(f"{str(e)}")
|
||||
return pq(text.encode('utf-8'))
|
||||
|
||||
def getlist(self, data):
|
||||
videos = []
|
||||
for i in data.items():
|
||||
videos.append({
|
||||
'vod_id': i('a').attr('href'),
|
||||
'vod_name': i('a img').attr('alt'),
|
||||
'vod_pic': i('a img').attr('src'),
|
||||
'vod_remarks': i('.dycategory').text(),
|
||||
'vod_year': i('.dyplayinfo').text() or i('.rating').text(),
|
||||
})
|
||||
return videos
|
||||
|
||||
def aes(self, word, key, iv):
|
||||
key = key.encode('utf-8')
|
||||
iv = iv.encode('utf-8')
|
||||
encrypted_data = base64.b64decode(word)
|
||||
cipher = AES.new(key, AES.MODE_CBC, iv)
|
||||
decrypted_data = cipher.decrypt(encrypted_data)
|
||||
decrypted_data = unpad(decrypted_data, AES.block_size)
|
||||
return decrypted_data.decode('utf-8')
|
93
PY/美帕APP.py
Normal file
93
PY/美帕APP.py
Normal file
@ -0,0 +1,93 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# by @嗷呜
|
||||
import sys
|
||||
sys.path.append('..')
|
||||
from base.spider import Spider
|
||||
|
||||
|
||||
class Spider(Spider):
|
||||
def getName(self):
|
||||
return "mp"
|
||||
|
||||
def init(self, extend=""):
|
||||
pass
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
pass
|
||||
|
||||
def manualVideoCheck(self):
|
||||
pass
|
||||
|
||||
def destroy(self):
|
||||
pass
|
||||
|
||||
host = 'https://g.c494.com'
|
||||
|
||||
header = {
|
||||
'User-Agent': 'Dart/2.10 (dart:io)',
|
||||
'platform_version': 'RP1A.200720.011',
|
||||
'version': '2.2.3',
|
||||
'copyright': 'xiaogui',
|
||||
'platform': 'android',
|
||||
'client_name': '576O5p+P5b2x6KeG',
|
||||
}
|
||||
|
||||
def homeContent(self, filter):
|
||||
data = self.fetch(f'{self.host}/api.php/app/nav?token=', headers=self.header).json()
|
||||
dy = {"class": "类型", "area": "地区", "lang": "语言", "year": "年份", "letter": "字母", "by": "排序",
|
||||
"sort": "排序"}
|
||||
filters = {}
|
||||
classes = []
|
||||
json_data = data["list"]
|
||||
for item in json_data:
|
||||
has_non_empty_field = False
|
||||
jsontype_extend = item["type_extend"]
|
||||
classes.append({"type_name": item["type_name"], "type_id": str(item["type_id"])})
|
||||
for key in dy:
|
||||
if key in jsontype_extend and jsontype_extend[key].strip() != "":
|
||||
has_non_empty_field = True
|
||||
break
|
||||
if has_non_empty_field:
|
||||
filters[str(item["type_id"])] = []
|
||||
for dkey in jsontype_extend:
|
||||
if dkey in dy and jsontype_extend[dkey].strip() != "":
|
||||
values = jsontype_extend[dkey].split(",")
|
||||
value_array = [{"n": value.strip(), "v": value.strip()} for value in values if
|
||||
value.strip() != ""]
|
||||
filters[str(item["type_id"])].append({"key": dkey, "name": dy[dkey], "value": value_array})
|
||||
result = {}
|
||||
result["class"] = classes
|
||||
result["filters"] = filters
|
||||
return result
|
||||
|
||||
def homeVideoContent(self):
|
||||
rsp = self.fetch(f"{self.host}/api.php/app/index_video?token=", headers=self.header)
|
||||
root = rsp.json()['list']
|
||||
videos = [item for vodd in root for item in vodd['vlist']]
|
||||
return {'list': videos}
|
||||
|
||||
def categoryContent(self, tid, pg, filter, extend):
|
||||
parms = {"pg": pg, "tid": tid, "class": extend.get("class", ""), "area": extend.get("area", ""),
|
||||
"lang": extend.get("lang", ""), "year": extend.get("year", ""), "token": ""}
|
||||
data = self.fetch(f'{self.host}/api.php/app/video', params=parms, headers=self.header).json()
|
||||
return data
|
||||
|
||||
def detailContent(self, ids):
|
||||
parms = {"id": ids[0], "token": ""}
|
||||
data = self.fetch(f'{self.host}/api.php/app/video_detail', params=parms, headers=self.header).json()
|
||||
vod = data['data']
|
||||
vod.pop('pause_advert_list', None)
|
||||
vod.pop('init_advert_list', None)
|
||||
vod.pop('vod_url_with_player', None)
|
||||
return {"list": [vod]}
|
||||
|
||||
def searchContent(self, key, quick, pg='1'):
|
||||
parms = {'pg': pg, 'text': key, 'token': ''}
|
||||
data = self.fetch(f'{self.host}/api.php/app/search', params=parms, headers=self.header).json()
|
||||
return data
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
return {"parse": 0, "url": id, "header": {'User-Agent': 'User-Agent: Lavf/58.12.100'}}
|
||||
|
||||
def localProxy(self, param):
|
||||
pass
|
215
PY/胖虎APP.py
Normal file
215
PY/胖虎APP.py
Normal file
@ -0,0 +1,215 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# by @嗷呜
|
||||
import re
|
||||
import sys
|
||||
sys.path.append('..')
|
||||
from base.spider import Spider
|
||||
from Cryptodome.Cipher import AES
|
||||
from Cryptodome.Util.Padding import pad, unpad
|
||||
from base64 import b64encode, b64decode
|
||||
import json
|
||||
import time
|
||||
|
||||
|
||||
class Spider(Spider):
|
||||
def getName(self):
|
||||
return "py_胖虎"
|
||||
|
||||
def init(self, extend=""):
|
||||
pass
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
pass
|
||||
|
||||
def manualVideoCheck(self):
|
||||
pass
|
||||
|
||||
def destroy(self):
|
||||
pass
|
||||
|
||||
def aes(self, operation, text):
|
||||
key = "ihIwTbt2YAe9TGea".encode('utf-8')
|
||||
iv = key
|
||||
|
||||
if operation == 'encrypt':
|
||||
cipher = AES.new(key, AES.MODE_CBC, iv)
|
||||
ct_bytes = cipher.encrypt(pad(text.encode('utf-8'), AES.block_size))
|
||||
ct = b64encode(ct_bytes).decode('utf-8')
|
||||
return ct
|
||||
elif operation == 'decrypt':
|
||||
cipher = AES.new(key, AES.MODE_CBC, iv)
|
||||
pt = unpad(cipher.decrypt(b64decode(text)), AES.block_size)
|
||||
return pt.decode('utf-8')
|
||||
|
||||
host = "http://sm.physkan.top:3389"
|
||||
t = str(int(time.time()))
|
||||
|
||||
def homeContent(self, filter):
|
||||
self.header = {
|
||||
'User-Agent': 'okhttp/3.14.9',
|
||||
'app-version-code': '402',
|
||||
'app-ui-mode': 'light',
|
||||
'app-user-device-id': '25f869d32598d3d3089a929453dff0bb7',
|
||||
'app-api-verify-time': self.t,
|
||||
'app-api-verify-sign': self.aes('encrypt', self.t),
|
||||
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
|
||||
}
|
||||
data = self.fetch("{0}/api.php/getappapi.index/initV119".format(self.host), headers=self.header).content.decode(
|
||||
'utf-8')
|
||||
data1 = json.loads(data)['data']
|
||||
print(data1)
|
||||
data2 = self.aes('decrypt', data1)
|
||||
dy = {
|
||||
"class": "类型",
|
||||
"area": "地区",
|
||||
"lang": "语言",
|
||||
"year": "年份",
|
||||
"letter": "字母",
|
||||
"by": "排序",
|
||||
"sort": "排序"
|
||||
}
|
||||
|
||||
filter = {}
|
||||
classes = []
|
||||
json_data = json.loads(data2)['type_list']
|
||||
self.homedata = json.loads(data2)['banner_list']
|
||||
|
||||
for item in json_data:
|
||||
if item['type_name'] == '全部':
|
||||
continue
|
||||
|
||||
has_non_empty_field = False
|
||||
jsontype_extend = json.loads(item['type_extend'])
|
||||
jsontype_extend["sort"] = "最新,最热,最赞"
|
||||
|
||||
classes.append({
|
||||
"type_name": item['type_name'],
|
||||
"type_id": item['type_id']
|
||||
})
|
||||
|
||||
for key in dy:
|
||||
if key in jsontype_extend and jsontype_extend[key].strip() != "":
|
||||
has_non_empty_field = True
|
||||
break
|
||||
|
||||
if has_non_empty_field:
|
||||
filter[str(item['type_id'])] = []
|
||||
|
||||
for dkey in jsontype_extend:
|
||||
if dkey in dy and jsontype_extend[dkey].strip() != "":
|
||||
values = jsontype_extend[dkey].split(',')
|
||||
value_array = [
|
||||
{"n": value.strip(), "v": value.strip()}
|
||||
for value in values if value.strip() != ''
|
||||
]
|
||||
|
||||
filter[str(item['type_id'])].append({
|
||||
"key": dkey,
|
||||
"name": dy[dkey],
|
||||
"value": value_array
|
||||
})
|
||||
result = {}
|
||||
result['class'] = classes
|
||||
result['filter'] = filter
|
||||
return result
|
||||
|
||||
def homeVideoContent(self):
|
||||
result = {
|
||||
'list': self.homedata
|
||||
}
|
||||
return result
|
||||
|
||||
def categoryContent(self, tid, pg, filter, extend):
|
||||
body = f"area={extend.get('area', '全部')}&year={extend.get('year', '全部')}&type_id={tid}&page={pg}&sort={extend.get('sort', '最新')}&lang={extend.get('lang', '全部')}&class={extend.get('class', '全部')}"
|
||||
result = {}
|
||||
url = '{0}/api.php/getappapi.index/typeFilterVodList'.format(self.host)
|
||||
data = self.post(url, headers=self.header, data=body).content.decode('utf-8')
|
||||
data1 = json.loads(data)['data']
|
||||
data2 = self.aes('decrypt', data1)
|
||||
result['list'] = json.loads(data2)['recommend_list']
|
||||
result['page'] = pg
|
||||
result['pagecount'] = 9999
|
||||
result['limit'] = 90
|
||||
result['total'] = 999999
|
||||
return result
|
||||
|
||||
def detailContent(self, ids):
|
||||
body = f"vod_id={ids[0]}"
|
||||
print(body)
|
||||
url = '{0}/api.php/getappapi.index/vodDetail'.format(self.host)
|
||||
data = self.post(url, headers=self.header, data=body).content.decode('utf-8')
|
||||
data1 = json.loads(data)['data']
|
||||
data2 = json.loads(self.aes('decrypt', data1))
|
||||
print(data2)
|
||||
vod = data2['vod']
|
||||
print(vod)
|
||||
play = []
|
||||
names = []
|
||||
for itt in data2['vod_play_list']:
|
||||
a = []
|
||||
names.append(itt['player_info']['show'])
|
||||
parse = itt['player_info']['parse']
|
||||
for it in itt['urls']:
|
||||
if re.search(r'mp4|m3u8', it['url']):
|
||||
a.append(f"{it['name']}${it['url']}")
|
||||
elif re.search(r'www.yemu.xyz', it['parse_api_url']):
|
||||
a.append(f"{it['name']}${it['parse_api_url']}")
|
||||
else:
|
||||
a.append(
|
||||
f"{it['name']}${'parse_api=' + parse + '&url=' + self.aes('encrypt', it['url']) + '&token=' + it['token']}")
|
||||
play.append('#'.join(a))
|
||||
vod['vod_play_from'] = '$$$'.join(names)
|
||||
vod['vod_play_url'] = '$$$'.join(play)
|
||||
result = {
|
||||
'list': [
|
||||
vod
|
||||
]
|
||||
}
|
||||
return result
|
||||
|
||||
def searchContent(self, key, quick, pg='1'):
|
||||
body = f"keywords={key}&type_id=0&page={pg}"
|
||||
url = '{0}/api.php/getappapi.index/searchList'.format(self.host)
|
||||
data = self.post(url, headers=self.header, data=body).content.decode('utf-8')
|
||||
data1 = json.loads(data)['data']
|
||||
data2 = self.aes('decrypt', data1)
|
||||
result = {
|
||||
'list': json.loads(data2)['search_list']
|
||||
}
|
||||
return result
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
def edu(str):
|
||||
def replacer(match):
|
||||
from urllib.parse import quote_plus
|
||||
return match.group(1) + quote_plus(match.group(2)) + match.group(3)
|
||||
|
||||
return re.sub(r'(url=)(.*?)(&token)', replacer, str)
|
||||
|
||||
url = id
|
||||
parse = 0
|
||||
if 'm3u8' not in url and 'mp4' not in url:
|
||||
try:
|
||||
body = edu(url)
|
||||
print(body)
|
||||
data = self.post('{0}/api.php/getappapi.index/vodParse'.format(self.host), headers=self.header,
|
||||
data=body).content.decode('utf-8')
|
||||
data1 = json.loads(data)['data']
|
||||
data2 = json.loads(self.aes('decrypt', data1))['json']
|
||||
url = json.loads(data2)['url']
|
||||
except:
|
||||
url = id
|
||||
parse = 1
|
||||
if not id.startswith('https://www.yemu.xyz'):
|
||||
url = 'https://www.yemu.xyz/?url={0}'.format(id)
|
||||
result = {}
|
||||
print(url)
|
||||
headers = self.header.copy()
|
||||
del headers['Content-type']
|
||||
result["parse"] = parse
|
||||
result["url"] = url
|
||||
result["header"] = headers
|
||||
return result
|
||||
|
||||
def localProxy(self, param):
|
||||
pass
|
323
PY/腾.py
Normal file
323
PY/腾.py
Normal file
@ -0,0 +1,323 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# by @嗷呜
|
||||
import json
|
||||
import sys
|
||||
import uuid
|
||||
import copy
|
||||
sys.path.append('..')
|
||||
from base.spider import Spider
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
|
||||
|
||||
class Spider(Spider):
|
||||
|
||||
def init(self, extend=""):
|
||||
self.dbody = {
|
||||
"page_params": {
|
||||
"channel_id": "",
|
||||
"filter_params": "sort=75",
|
||||
"page_type": "channel_operation",
|
||||
"page_id": "channel_list_second_page"
|
||||
}
|
||||
}
|
||||
self.body = self.dbody
|
||||
pass
|
||||
|
||||
def getName(self):
|
||||
pass
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
pass
|
||||
|
||||
def manualVideoCheck(self):
|
||||
pass
|
||||
|
||||
def destroy(self):
|
||||
pass
|
||||
|
||||
host = 'https://v.qq.com'
|
||||
|
||||
apihost = 'https://pbaccess.video.qq.com'
|
||||
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.5410.0 Safari/537.36',
|
||||
'origin': host,
|
||||
'referer': f'{host}/'
|
||||
}
|
||||
|
||||
def homeContent(self, filter):
|
||||
cdata = {
|
||||
"电视剧": "100113",
|
||||
"电影": "100173",
|
||||
"综艺": "100109",
|
||||
"纪录片": "100105",
|
||||
"动漫": "100119",
|
||||
"少儿": "100150",
|
||||
"短剧": "110755"
|
||||
}
|
||||
result = {}
|
||||
classes = []
|
||||
filters = {}
|
||||
for k in cdata:
|
||||
classes.append({
|
||||
'type_name': k,
|
||||
'type_id': cdata[k]
|
||||
})
|
||||
with ThreadPoolExecutor(max_workers=len(classes)) as executor:
|
||||
futures = [executor.submit(self.get_filter_data, item['type_id']) for item in classes]
|
||||
for future in futures:
|
||||
cid, data = future.result()
|
||||
if not data.get('data', {}).get('module_list_datas'):
|
||||
continue
|
||||
filter_dict = {}
|
||||
try:
|
||||
items = data['data']['module_list_datas'][-1]['module_datas'][-1]['item_data_lists']['item_datas']
|
||||
for item in items:
|
||||
if not item.get('item_params', {}).get('index_item_key'):
|
||||
continue
|
||||
params = item['item_params']
|
||||
filter_key = params['index_item_key']
|
||||
if filter_key not in filter_dict:
|
||||
filter_dict[filter_key] = {
|
||||
'key': filter_key,
|
||||
'name': params['index_name'],
|
||||
'value': []
|
||||
}
|
||||
filter_dict[filter_key]['value'].append({
|
||||
'n': params['option_name'],
|
||||
'v': params['option_value']
|
||||
})
|
||||
except (IndexError, KeyError):
|
||||
continue
|
||||
filters[cid] = list(filter_dict.values())
|
||||
result['class'] = classes
|
||||
result['filters'] = filters
|
||||
return result
|
||||
|
||||
def homeVideoContent(self):
|
||||
json_data = {'page_context':None,'page_params':{'page_id':'100101','page_type':'channel','skip_privacy_types':'0','support_click_scan':'1','new_mark_label_enabled':'1','ams_cookies':'',},'page_bypass_params':{'params':{'caller_id':'','data_mode':'default','page_id':'','page_type':'channel','platform_id':'2','user_mode':'default',},'scene':'channel','abtest_bypass_id':'',}}
|
||||
data = self.post(f'{self.apihost}/trpc.vector_layout.page_view.PageService/getPage',headers=self.headers, json=json_data).json()
|
||||
vlist = []
|
||||
for it in data['data']['CardList'][0]['children_list']['list']['cards']:
|
||||
if it.get('params'):
|
||||
p = it['params']
|
||||
tag = json.loads(p.get('uni_imgtag', '{}') or p.get('imgtag', '{}') or '{}')
|
||||
id = it.get('id') or p.get('cid')
|
||||
name = p.get('mz_title') or p.get('title')
|
||||
if name and 'http' not in id:
|
||||
vlist.append({
|
||||
'vod_id': id,
|
||||
'vod_name': name,
|
||||
'vod_pic': p.get('image_url'),
|
||||
'vod_year': tag.get('tag_2', {}).get('text'),
|
||||
'vod_remarks': tag.get('tag_4', {}).get('text')
|
||||
})
|
||||
return {'list': vlist}
|
||||
|
||||
def categoryContent(self, tid, pg, filter, extend):
|
||||
result = {}
|
||||
params = {
|
||||
"sort": extend.get('sort', '75'),
|
||||
"attr": extend.get('attr', '-1'),
|
||||
"itype": extend.get('itype', '-1'),
|
||||
"ipay": extend.get('ipay', '-1'),
|
||||
"iarea": extend.get('iarea', '-1'),
|
||||
"iyear": extend.get('iyear', '-1'),
|
||||
"theater": extend.get('theater', '-1'),
|
||||
"award": extend.get('award', '-1'),
|
||||
"recommend": extend.get('recommend', '-1')
|
||||
}
|
||||
if pg == '1':
|
||||
self.body = self.dbody.copy()
|
||||
self.body['page_params']['channel_id'] = tid
|
||||
self.body['page_params']['filter_params'] = self.josn_to_params(params)
|
||||
data = self.post(
|
||||
f'{self.apihost}/trpc.universal_backend_service.page_server_rpc.PageServer/GetPageData?video_appid=1000005&vplatform=2&vversion_name=8.9.10&new_mark_label_enabled=1',
|
||||
json=self.body, headers=self.headers).json()
|
||||
ndata = data['data']
|
||||
if ndata['has_next_page']:
|
||||
result['pagecount'] = 9999
|
||||
self.body['page_context'] = ndata['next_page_context']
|
||||
else:
|
||||
result['pagecount'] = int(pg)
|
||||
vlist = []
|
||||
for its in ndata['module_list_datas'][-1]['module_datas'][-1]['item_data_lists']['item_datas']:
|
||||
id = its.get('item_params', {}).get('cid')
|
||||
if id:
|
||||
p = its['item_params']
|
||||
tag = json.loads(p.get('uni_imgtag', '{}') or p.get('imgtag', '{}') or '{}')
|
||||
name = p.get('mz_title') or p.get('title')
|
||||
pic = p.get('new_pic_hz') or p.get('new_pic_vt')
|
||||
vlist.append({
|
||||
'vod_id': id,
|
||||
'vod_name': name,
|
||||
'vod_pic': pic,
|
||||
'vod_year': tag.get('tag_2', {}).get('text'),
|
||||
'vod_remarks': tag.get('tag_4', {}).get('text')
|
||||
})
|
||||
result['list'] = vlist
|
||||
result['page'] = pg
|
||||
result['limit'] = 90
|
||||
result['total'] = 999999
|
||||
return result
|
||||
|
||||
def detailContent(self, ids):
|
||||
vbody = {"page_params":{"req_from":"web","cid":ids[0],"vid":"","lid":"","page_type":"detail_operation","page_id":"detail_page_introduction"},"has_cache":1}
|
||||
body = {"page_params":{"req_from":"web_vsite","page_id":"vsite_episode_list","page_type":"detail_operation","id_type":"1","page_size":"","cid":ids[0],"vid":"","lid":"","page_num":"","page_context":"","detail_page_type":"1"},"has_cache":1}
|
||||
with ThreadPoolExecutor(max_workers=2) as executor:
|
||||
future_detail = executor.submit(self.get_vdata, vbody)
|
||||
future_episodes = executor.submit(self.get_vdata, body)
|
||||
vdata = future_detail.result()
|
||||
data = future_episodes.result()
|
||||
|
||||
pdata = self.process_tabs(data, body, ids)
|
||||
if not pdata:
|
||||
return self.handle_exception(None, "No pdata available")
|
||||
|
||||
try:
|
||||
star_list = vdata['data']['module_list_datas'][0]['module_datas'][0]['item_data_lists']['item_datas'][
|
||||
0].get('sub_items', {}).get('star_list', {}).get('item_datas', [])
|
||||
actors = [star['item_params']['name'] for star in star_list]
|
||||
names = ['腾讯视频', '预告片']
|
||||
plist, ylist = self.process_pdata(pdata, ids)
|
||||
if not plist:
|
||||
del names[0]
|
||||
if not ylist:
|
||||
del names[1]
|
||||
vod = self.build_vod(vdata, actors, plist, ylist, names)
|
||||
return {'list': [vod]}
|
||||
except Exception as e:
|
||||
return self.handle_exception(e, "Error processing detail")
|
||||
|
||||
def searchContent(self, key, quick, pg="1"):
|
||||
headers = self.headers.copy()
|
||||
headers.update({'Content-Type': 'application/json'})
|
||||
body = {'version':'25021101','clientType':1,'filterValue':'','uuid':str(uuid.uuid4()),'retry':0,'query':key,'pagenum':int(pg)-1,'pagesize':30,'queryFrom':0,'searchDatakey':'','transInfo':'','isneedQc':True,'preQid':'','adClientInfo':'','extraInfo':{'isNewMarkLabel':'1','multi_terminal_pc':'1','themeType':'1',},}
|
||||
data = self.post(f'{self.apihost}/trpc.videosearch.mobile_search.MultiTerminalSearch/MbSearch?vplatform=2',
|
||||
json=body, headers=headers).json()
|
||||
vlist = []
|
||||
vname=["电视剧", "电影", "综艺", "纪录片", "动漫", "少儿", "短剧"]
|
||||
v=data['data']['normalList']['itemList']
|
||||
d=data['data']['areaBoxList'][0]['itemList']
|
||||
q=v+d
|
||||
if v[0].get('doc') and v[0]['doc'].get('id') =='MainNeed':q=d+v
|
||||
for k in q:
|
||||
if k.get('doc') and k.get('videoInfo') and k['doc'].get('id') and '外站' not in k['videoInfo'].get('subTitle') and k['videoInfo'].get('title') and k['videoInfo'].get('typeName') in vname:
|
||||
img_tag = k.get('videoInfo', {}).get('imgTag')
|
||||
if img_tag is not None and isinstance(img_tag, str):
|
||||
try:
|
||||
tag = json.loads(img_tag)
|
||||
except json.JSONDecodeError as e:
|
||||
tag = {}
|
||||
else:
|
||||
tag = {}
|
||||
pic = k.get('videoInfo', {}).get('imgUrl')
|
||||
vlist.append({
|
||||
'vod_id': k['doc']['id'],
|
||||
'vod_name': self.removeHtmlTags(k['videoInfo']['title']),
|
||||
'vod_pic': pic,
|
||||
'vod_year': k['videoInfo'].get('typeName') +' '+ tag.get('tag_2', {}).get('text', ''),
|
||||
'vod_remarks': tag.get('tag_4', {}).get('text', '')
|
||||
})
|
||||
return {'list': vlist, 'page': pg}
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
ids = id.split('@')
|
||||
url = f"{self.host}/x/cover/{ids[0]}/{ids[1]}.html"
|
||||
return {'jx':1,'parse': 1, 'url': url, 'header': ''}
|
||||
|
||||
def localProxy(self, param):
|
||||
pass
|
||||
|
||||
def get_filter_data(self, cid):
|
||||
hbody = self.dbody.copy()
|
||||
hbody['page_params']['channel_id'] = cid
|
||||
data = self.post(
|
||||
f'{self.apihost}/trpc.universal_backend_service.page_server_rpc.PageServer/GetPageData?video_appid=1000005&vplatform=2&vversion_name=8.9.10&new_mark_label_enabled=1',
|
||||
json=hbody, headers=self.headers).json()
|
||||
return cid, data
|
||||
|
||||
def get_vdata(self, body):
|
||||
try:
|
||||
vdata = self.post(
|
||||
f'{self.apihost}/trpc.universal_backend_service.page_server_rpc.PageServer/GetPageData?video_appid=3000010&vplatform=2&vversion_name=8.2.96',
|
||||
json=body, headers=self.headers
|
||||
).json()
|
||||
return vdata
|
||||
except Exception as e:
|
||||
print(f"Error in get_vdata: {str(e)}")
|
||||
return {'data': {'module_list_datas': []}}
|
||||
|
||||
def process_pdata(self, pdata, ids):
|
||||
plist = []
|
||||
ylist = []
|
||||
for k in pdata:
|
||||
if k.get('item_id'):
|
||||
pid = f"{k['item_params']['union_title']}${ids[0]}@{k['item_id']}"
|
||||
if '预告' in k['item_params']['union_title']:
|
||||
ylist.append(pid)
|
||||
else:
|
||||
plist.append(pid)
|
||||
return plist, ylist
|
||||
|
||||
def build_vod(self, vdata, actors, plist, ylist, names):
|
||||
d = vdata['data']['module_list_datas'][0]['module_datas'][0]['item_data_lists']['item_datas'][0]['item_params']
|
||||
urls = []
|
||||
if plist:
|
||||
urls.append('#'.join(plist))
|
||||
if ylist:
|
||||
urls.append('#'.join(ylist))
|
||||
vod = {
|
||||
'type_name': d.get('sub_genre', ''),
|
||||
'vod_name': d.get('title', ''),
|
||||
'vod_year': d.get('year', ''),
|
||||
'vod_area': d.get('area_name', ''),
|
||||
'vod_remarks': d.get('holly_online_time', '') or d.get('hotval', ''),
|
||||
'vod_actor': ','.join(actors),
|
||||
'vod_content': d.get('cover_description', ''),
|
||||
'vod_play_from': '$$$'.join(names),
|
||||
'vod_play_url': '$$$'.join(urls)
|
||||
}
|
||||
return vod
|
||||
|
||||
def handle_exception(self, e, message):
|
||||
print(f"{message}: {str(e)}")
|
||||
return {'list': [{'vod_play_from': '哎呀翻车啦', 'vod_play_url': '翻车啦#555'}]}
|
||||
|
||||
def process_tabs(self, data, body, ids):
|
||||
try:
|
||||
pdata = data['data']['module_list_datas'][-1]['module_datas'][-1]['item_data_lists']['item_datas']
|
||||
tabs = data['data']['module_list_datas'][-1]['module_datas'][-1]['module_params'].get('tabs')
|
||||
if tabs and len(json.loads(tabs)):
|
||||
tabs = json.loads(tabs)
|
||||
remaining_tabs = tabs[1:]
|
||||
task_queue = []
|
||||
for tab in remaining_tabs:
|
||||
nbody = copy.deepcopy(body)
|
||||
nbody['page_params']['page_context'] = tab['page_context']
|
||||
task_queue.append(nbody)
|
||||
with ThreadPoolExecutor(max_workers=10) as executor:
|
||||
future_map = {executor.submit(self.get_vdata, task): idx for idx, task in enumerate(task_queue)}
|
||||
results = [None] * len(task_queue)
|
||||
for future in as_completed(future_map.keys()):
|
||||
idx = future_map[future]
|
||||
results[idx] = future.result()
|
||||
for result in results:
|
||||
if result:
|
||||
page_data = result['data']['module_list_datas'][-1]['module_datas'][-1]['item_data_lists'][
|
||||
'item_datas']
|
||||
pdata.extend(page_data)
|
||||
return pdata
|
||||
except Exception as e:
|
||||
print(f"Error processing episodes: {str(e)}")
|
||||
return []
|
||||
|
||||
def josn_to_params(self, params, skip_empty=False):
|
||||
query = []
|
||||
for k, v in params.items():
|
||||
if skip_empty and not v:
|
||||
continue
|
||||
query.append(f"{k}={v}")
|
||||
return "&".join(query)
|
||||
|
||||
|
205
PY/芒.py
Normal file
205
PY/芒.py
Normal file
@ -0,0 +1,205 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# by @嗷呜
|
||||
import sys
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
sys.path.append('..')
|
||||
from base.spider import Spider
|
||||
|
||||
class Spider(Spider):
|
||||
|
||||
def init(self, extend=""):
|
||||
pass
|
||||
|
||||
def getName(self):
|
||||
pass
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
pass
|
||||
|
||||
def manualVideoCheck(self):
|
||||
pass
|
||||
|
||||
def destroy(self):
|
||||
pass
|
||||
|
||||
rhost='https://www.mgtv.com'
|
||||
|
||||
host='https://pianku.api.mgtv.com'
|
||||
|
||||
vhost='https://pcweb.api.mgtv.com'
|
||||
|
||||
mhost='https://dc.bz.mgtv.com'
|
||||
|
||||
shost='https://mobileso.bz.mgtv.com'
|
||||
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.6478.61 Chrome/126.0.6478.61 Not/A)Brand/8 Safari/537.36',
|
||||
'origin': rhost,
|
||||
'referer': f'{rhost}/'
|
||||
}
|
||||
|
||||
def homeContent(self, filter):
|
||||
result = {}
|
||||
cateManual = {
|
||||
"电影": "3",
|
||||
"电视剧": "2",
|
||||
"综艺": "1",
|
||||
"动画": "50",
|
||||
"少儿": "10",
|
||||
"纪录片": "51",
|
||||
"教育": "115"
|
||||
}
|
||||
classes = []
|
||||
filters = {}
|
||||
for k in cateManual:
|
||||
classes.append({
|
||||
'type_name': k,
|
||||
'type_id': cateManual[k]
|
||||
})
|
||||
with ThreadPoolExecutor(max_workers=len(classes)) as executor:
|
||||
results = executor.map(self.getf, classes)
|
||||
for id, ft in results:
|
||||
if len(ft):filters[id] = ft
|
||||
result['class'] = classes
|
||||
result['filters'] = filters
|
||||
return result
|
||||
|
||||
def homeVideoContent(self):
|
||||
data=self.fetch(f'{self.mhost}/dynamic/v1/channel/index/0/0/0/1000000/0/0/17/1354?type=17&version=5.0&t={str(int(time.time()*1000))}&_support=10000000', headers=self.headers).json()
|
||||
videoList = []
|
||||
for i in data['data']:
|
||||
if i.get('DSLList') and len(i['DSLList']):
|
||||
for j in i['DSLList']:
|
||||
if j.get('data') and j['data'].get('items') and len(j['data']['items']):
|
||||
for k in j['data']['items']:
|
||||
videoList.append({
|
||||
'vod_id': k["videoId"],
|
||||
'vod_name': k['videoName'],
|
||||
'vod_pic': k['img'],
|
||||
'vod_year': k.get('cornerTitle'),
|
||||
'vod_remarks': k.get('time') or k.get('desc'),
|
||||
})
|
||||
return {'list':videoList}
|
||||
|
||||
def categoryContent(self, tid, pg, filter, extend):
|
||||
body={
|
||||
'allowedRC': '1',
|
||||
'platform': 'pcweb',
|
||||
'channelId': tid,
|
||||
'pn': pg,
|
||||
'pc': '80',
|
||||
'hudong': '1',
|
||||
'_support': '10000000'
|
||||
}
|
||||
body.update(extend)
|
||||
data=self.fetch(f'{self.host}/rider/list/pcweb/v3', params=body, headers=self.headers).json()
|
||||
videoList = []
|
||||
for i in data['data']['hitDocs']:
|
||||
videoList.append({
|
||||
'vod_id': i["playPartId"],
|
||||
'vod_name': i['title'],
|
||||
'vod_pic': i['img'],
|
||||
'vod_year': (i.get('rightCorner',{}) or {}).get('text') or i.get('year'),
|
||||
'vod_remarks': i['updateInfo']
|
||||
})
|
||||
result = {}
|
||||
result['list'] = videoList
|
||||
result['page'] = pg
|
||||
result['pagecount'] = 9999
|
||||
result['limit'] = 90
|
||||
result['total'] = 999999
|
||||
return result
|
||||
|
||||
def detailContent(self, ids):
|
||||
vbody={'allowedRC': '1', 'vid': ids[0], 'type': 'b', '_support': '10000000'}
|
||||
vdata=self.fetch(f'{self.vhost}/video/info', params=vbody, headers=self.headers).json()
|
||||
d=vdata['data']['info']['detail']
|
||||
vod = {
|
||||
'vod_name': vdata['data']['info']['title'],
|
||||
'type_name': d.get('kind'),
|
||||
'vod_year': d.get('releaseTime'),
|
||||
'vod_area': d.get('area'),
|
||||
'vod_lang': d.get('language'),
|
||||
'vod_remarks': d.get('updateInfo'),
|
||||
'vod_actor': d.get('leader'),
|
||||
'vod_director': d.get('director'),
|
||||
'vod_content': d.get('story'),
|
||||
'vod_play_from': '芒果TV',
|
||||
'vod_play_url': ''
|
||||
}
|
||||
data,pdata=self.fetch_page_data('1', ids[0],True)
|
||||
pagecount=data['data'].get('total_page') or 1
|
||||
if int(pagecount)>1:
|
||||
pages = list(range(2, pagecount+1))
|
||||
page_results = {}
|
||||
with ThreadPoolExecutor(max_workers=10) as executor:
|
||||
future_to_page = {
|
||||
executor.submit(self.fetch_page_data, page, ids[0]): page
|
||||
for page in pages
|
||||
}
|
||||
for future in as_completed(future_to_page):
|
||||
page = future_to_page[future]
|
||||
try:
|
||||
result = future.result()
|
||||
page_results[page] = result
|
||||
except Exception as e:
|
||||
print(f"Error fetching page {page}: {e}")
|
||||
for page in sorted(page_results.keys()):
|
||||
pdata.extend(page_results[page])
|
||||
vod['vod_play_url'] = '#'.join(pdata)
|
||||
return {'list':[vod]}
|
||||
|
||||
def searchContent(self, key, quick, pg="1"):
|
||||
data=self.fetch(f'{self.shost}/applet/search/v1?channelCode=mobile-wxap&q={key}&pn={pg}&pc=10&_support=10000000', headers=self.headers).json()
|
||||
videoList = []
|
||||
for i in data['data']['contents']:
|
||||
if i.get('data') and len(i['data']):
|
||||
k = i['data'][0]
|
||||
if k.get('vid') and k.get('img'):
|
||||
try:
|
||||
videoList.append({
|
||||
'vod_id': k['vid'],
|
||||
'vod_name': k['title'],
|
||||
'vod_pic': k['img'],
|
||||
'vod_year': (i.get('rightTopCorner',{}) or {}).get('text') or i.get('year'),
|
||||
'vod_remarks': '/'.join(i.get('desc',[])),
|
||||
})
|
||||
except:
|
||||
print(k)
|
||||
return {'list':videoList,'page':pg}
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
id=f'{self.rhost}{id}'
|
||||
return {'jx':1,'parse': 1, 'url': id, 'header': ''}
|
||||
|
||||
def localProxy(self, param):
|
||||
pass
|
||||
|
||||
def getf(self, body):
|
||||
params = {
|
||||
'allowedRC': '1',
|
||||
'channelId': body['type_id'],
|
||||
'platform': 'pcweb',
|
||||
'_support': '10000000',
|
||||
}
|
||||
data = self.fetch(f'{self.host}/rider/config/channel/v1', params=params, headers=self.headers).json()
|
||||
ft = []
|
||||
for i in data['data']['listItems']:
|
||||
try:
|
||||
value_array = [{"n": value['tagName'], "v": value['tagId']} for value in i['items'] if
|
||||
value.get('tagName')]
|
||||
ft.append({"key": i['eName'], "name": i['typeName'], "value": value_array})
|
||||
except:
|
||||
print(i)
|
||||
return body['type_id'], ft
|
||||
|
||||
def fetch_page_data(self, page, id, b=False):
|
||||
body = {'version': '5.5.35', 'video_id': id, 'page': page, 'size': '30',
|
||||
'platform': '4', 'src': 'mgtv', 'allowedRC': '1', '_support': '10000000'}
|
||||
data = self.fetch(f'{self.vhost}/episode/list', params=body, headers=self.headers).json()
|
||||
ldata = [f'{i["t3"]}${i["url"]}' for i in data['data']['list']]
|
||||
if b:
|
||||
return data, ldata
|
||||
else:
|
||||
return ldata
|
239
PY/视觉APP.py
Normal file
239
PY/视觉APP.py
Normal file
@ -0,0 +1,239 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# by @嗷呜
|
||||
import sys
|
||||
sys.path.append("..")
|
||||
import re
|
||||
from Crypto.Cipher import AES
|
||||
from Crypto.Util.Padding import pad, unpad
|
||||
from base64 import b64encode, b64decode
|
||||
import json
|
||||
from base.spider import Spider
|
||||
from urllib.parse import quote
|
||||
|
||||
|
||||
class Spider(Spider):
|
||||
|
||||
def getName(self):
|
||||
return "视觉"
|
||||
|
||||
def init(self, extend=""):
|
||||
self.host = self.host()
|
||||
pass
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
pass
|
||||
|
||||
def manualVideoCheck(self):
|
||||
pass
|
||||
|
||||
def action(self, action):
|
||||
pass
|
||||
|
||||
def destroy(self):
|
||||
pass
|
||||
|
||||
def homeContent(self, filter):
|
||||
data = self.fetch(
|
||||
f"{self.host}/api/v3/drama/getCategory?orderBy=type_id",
|
||||
headers=self.headers,
|
||||
).json()
|
||||
dy = {
|
||||
"class": "类型",
|
||||
"area": "地区",
|
||||
"lang": "语言",
|
||||
"year": "年份",
|
||||
"letter": "字母",
|
||||
"by": "排序",
|
||||
"sort": "排序",
|
||||
}
|
||||
filters = {}
|
||||
classes = []
|
||||
for item in data["data"]:
|
||||
has_non_empty_field = False
|
||||
jsontype_extend = json.loads(item["converUrl"])
|
||||
classes.append({"type_name": item["name"], "type_id": str(item["id"])})
|
||||
for key in dy:
|
||||
if key in jsontype_extend and jsontype_extend[key].strip() != "":
|
||||
has_non_empty_field = True
|
||||
break
|
||||
if has_non_empty_field:
|
||||
filters[str(item["id"])] = []
|
||||
for dkey in jsontype_extend:
|
||||
if dkey in dy and jsontype_extend[dkey].strip() != "":
|
||||
values = jsontype_extend[dkey].split(",")
|
||||
value_array = [
|
||||
{"n": value.strip(), "v": value.strip()}
|
||||
for value in values
|
||||
if value.strip() != ""
|
||||
]
|
||||
filters[str(item["id"])].append(
|
||||
{"key": dkey, "name": dy[dkey], "value": value_array}
|
||||
)
|
||||
result = {}
|
||||
result["class"] = classes
|
||||
result["filters"] = filters
|
||||
return result
|
||||
|
||||
def homeVideoContent(self):
|
||||
data = self.fetch(f"{self.host}/api/ex/v3/security/tag/list", headers=self.headers).json()["data"]
|
||||
data1 = self.aes(self.aes(data, self.key[0]), self.key[1], 'decrypt', True)
|
||||
list = []
|
||||
for item in data1[0]['carousels']:
|
||||
id = item['link'].split("id=")[1]
|
||||
list.append({
|
||||
"vod_id": id,
|
||||
'vod_name': item.get("title"),
|
||||
'vod_pic': item.get("cover"),
|
||||
'vod_remarks': item.get("sort"),
|
||||
})
|
||||
result = {"list": list}
|
||||
return result
|
||||
|
||||
def categoryContent(self, tid, pg, filter, extend):
|
||||
params = []
|
||||
if extend.get('area'):
|
||||
params.append(f"vodArea={extend['area']}")
|
||||
if extend.get('classs'):
|
||||
params.append(f"vodClass={extend['class']}")
|
||||
params.append("pagesize=20")
|
||||
params.append(f"typeId1={tid}")
|
||||
params.append(f"page={pg}")
|
||||
if extend.get('year'):
|
||||
params.append(f"vodYear={extend['year']}")
|
||||
body = '&'.join(params)
|
||||
path = self.aes(self.aes(body, self.key[1], 'encrypt'), self.key[0], 'encrypt', True)
|
||||
data = self.fetch(f"{self.host}/api/ex/v3/security/drama/list?query={path}", headers=self.headers).json()[
|
||||
"data"]
|
||||
data = self.aes(self.aes(data, self.key[0]), self.key[1], 'decrypt', True)['list']
|
||||
list = []
|
||||
for item in data:
|
||||
list.append({
|
||||
'vod_id': item.get("id"),
|
||||
'vod_pic': item["coverImage"].get("path"),
|
||||
'vod_name': item.get("name"),
|
||||
'vod_year': item.get("year"),
|
||||
'vod_remarks': item.get("remark")
|
||||
})
|
||||
result = {}
|
||||
result["list"] = list
|
||||
result["page"] = pg
|
||||
result["pagecount"] = 9999
|
||||
result["limit"] = 90
|
||||
result["total"] = 999999
|
||||
return result
|
||||
|
||||
def detailContent(self, ids):
|
||||
url = f"{self.host}/api/v3/drama/getDetail?id={ids[0]}"
|
||||
data = self.post(url, headers=self.headers).json()["data"]
|
||||
vod = {
|
||||
'vod_name': data.get("name"),
|
||||
'vod_area': data.get("area"),
|
||||
'type_name': data.get("clazz"),
|
||||
'vod_actor': data.get("actor"),
|
||||
'vod_director': data.get("director"),
|
||||
'vod_content': data.get("brief").strip(),
|
||||
}
|
||||
play = []
|
||||
names = []
|
||||
plays = {}
|
||||
for itt in data["videos"]:
|
||||
if itt["sourceCn"] not in names:
|
||||
plays[itt["source"]] = []
|
||||
names.append(itt["sourceCn"])
|
||||
url = f"vodPlayFrom={itt['source']}&playUrl={itt['path']}"
|
||||
if re.search(r"\.(mp4|m3u8|flv)$", itt["path"]):
|
||||
url = itt["path"]
|
||||
plays[itt["source"]].append(f"{itt['titleOld']}${url}")
|
||||
for it in plays:
|
||||
play.append("#".join(plays[it]))
|
||||
vod["vod_play_from"] = "$$$".join(names)
|
||||
vod["vod_play_url"] = "$$$".join(play)
|
||||
result = {"list": [vod]}
|
||||
return result
|
||||
|
||||
def searchContent(self, key, quick, pg=1):
|
||||
body = f"pagesize=20&page={pg}&searchKeys={key}"
|
||||
path = self.aes(self.aes(body, self.key[1], 'encrypt'), self.key[0], 'encrypt', True)
|
||||
data = self.fetch(f"{self.host}/api/ex/v3/security/drama/list?query={path}", headers=self.headers).json()[
|
||||
"data"]
|
||||
data = self.aes(self.aes(data, self.key[0]), self.key[1], 'decrypt', True)['list']
|
||||
list = []
|
||||
for item in data:
|
||||
list.append({
|
||||
'vod_id': item.get("id"),
|
||||
'vod_pic': item["coverImage"].get("path"),
|
||||
'vod_name': item.get("name"),
|
||||
'vod_year': item.get("year"),
|
||||
'vod_remarks': item.get("remark")
|
||||
})
|
||||
result = {"list": list, "page": pg}
|
||||
return result
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
url = id
|
||||
if "vodPlayFrom" in url:
|
||||
try:
|
||||
path = self.aes(self.aes(id, self.key[1], 'encrypt'), self.key[0], 'encrypt', True)
|
||||
data = \
|
||||
self.fetch(f"{self.host}/api/ex/v3/security/videoUsableUrl?query={path}", headers=self.headers).json()[
|
||||
"data"]
|
||||
url = self.aes(self.aes(data, self.key[0]), self.key[1], 'decrypt', True)['playUrl']
|
||||
# try:
|
||||
# url1 = self.fetch(url, headers=self.headers, timeout=5, allow_redirects=False).headers['Location']
|
||||
# if "http" in url1 and url1:
|
||||
# url = url1
|
||||
# except:
|
||||
# pass
|
||||
except Exception as e:
|
||||
pass
|
||||
if '.jpg' in url or '.jpeg' in url or '.png' in url:
|
||||
url = self.getProxyUrl() + "&url=" + b64encode(url.encode('utf-8')).decode('utf-8') + "&type=m3u8"
|
||||
result = {}
|
||||
result["parse"] = 0
|
||||
result["url"] = url
|
||||
result["header"] = {'User-Agent': 'okhttp/3.12.1'}
|
||||
return result
|
||||
|
||||
def localProxy(self, param):
|
||||
url = b64decode(param["url"]).decode('utf-8')
|
||||
durl = url[:url.rfind('/')]
|
||||
data = self.fetch(url, headers=self.headers).content.decode("utf-8")
|
||||
lines = data.strip().split('\n')
|
||||
for index, string in enumerate(lines):
|
||||
if '#EXT' not in string and 'http' not in string:
|
||||
lines[index] = durl + ('' if string.startswith('/') else '/') + string
|
||||
data = '\n'.join(lines)
|
||||
return [200, "application/vnd.apple.mpegur", data]
|
||||
|
||||
def host(self):
|
||||
try:
|
||||
url = self.fetch('https://www.shijue.pro/token.txt', headers=self.headers).json()['domain']
|
||||
return url
|
||||
except:
|
||||
return "http://118.25.18.217:6632"
|
||||
|
||||
headers = {
|
||||
'User-Agent': 'okhttp/3.12.1',
|
||||
'Content-Type': 'application/json;'
|
||||
}
|
||||
key = ['TFLYWVJ5EG5YB1PLZLVVMGVLBGRIDCSW', 'nj6E5K4yYYT5W4ScJ3J3rJ2zrzcJkpTk']
|
||||
|
||||
def aes(self, word, key, mode='decrypt', bool=False):
|
||||
key = key.encode('utf-8')
|
||||
if mode == 'decrypt':
|
||||
word = b64decode(word)
|
||||
cipher = AES.new(key, AES.MODE_ECB)
|
||||
decrypted = cipher.decrypt(word)
|
||||
word = unpad(decrypted, AES.block_size).decode('utf-8')
|
||||
if bool:
|
||||
word = json.loads(word)
|
||||
elif mode == 'encrypt':
|
||||
cipher = AES.new(key, AES.MODE_ECB)
|
||||
padded = pad(word.encode('utf-8'), AES.block_size)
|
||||
encrypted = cipher.encrypt(padded)
|
||||
word = b64encode(encrypted).decode('utf-8')
|
||||
if bool:
|
||||
word = quote(word)
|
||||
return word
|
||||
|
||||
|
210
PY/金牌.py
Normal file
210
PY/金牌.py
Normal file
@ -0,0 +1,210 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# by @嗷呜
|
||||
import json
|
||||
import sys
|
||||
import threading
|
||||
import uuid
|
||||
import requests
|
||||
sys.path.append('..')
|
||||
from base.spider import Spider
|
||||
import time
|
||||
from Crypto.Hash import MD5, SHA1
|
||||
|
||||
class Spider(Spider):
|
||||
|
||||
def init(self, extend=""):
|
||||
if extend:
|
||||
hosts=json.loads(extend)['site']
|
||||
self.host = self.host_late(hosts)
|
||||
pass
|
||||
|
||||
def getName(self):
|
||||
pass
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
pass
|
||||
|
||||
def manualVideoCheck(self):
|
||||
pass
|
||||
|
||||
def destroy(self):
|
||||
pass
|
||||
|
||||
def homeContent(self, filter):
|
||||
cdata = self.fetch(f"{self.host}/api/mw-movie/anonymous/get/filer/type", headers=self.getheaders()).json()
|
||||
fdata = self.fetch(f"{self.host}/api/mw-movie/anonymous/v1/get/filer/list", headers=self.getheaders()).json()
|
||||
result = {}
|
||||
classes = []
|
||||
filters={}
|
||||
for k in cdata['data']:
|
||||
classes.append({
|
||||
'type_name': k['typeName'],
|
||||
'type_id': str(k['typeId']),
|
||||
})
|
||||
sort_values = [{"n": "最近更新", "v": "2"},{"n": "人气高低", "v": "3"}, {"n": "评分高低", "v": "4"}]
|
||||
for tid, d in fdata['data'].items():
|
||||
current_sort_values = sort_values.copy()
|
||||
if tid == '1':
|
||||
del current_sort_values[0]
|
||||
filters[tid] = [
|
||||
{"key": "type", "name": "类型",
|
||||
"value": [{"n": i["itemText"], "v": i["itemValue"]} for i in d["typeList"]]},
|
||||
|
||||
*([] if not d["plotList"] else [{"key": "v_class", "name": "剧情",
|
||||
"value": [{"n": i["itemText"], "v": i["itemText"]}
|
||||
for i in d["plotList"]]}]),
|
||||
|
||||
{"key": "area", "name": "地区",
|
||||
"value": [{"n": i["itemText"], "v": i["itemText"]} for i in d["districtList"]]},
|
||||
|
||||
{"key": "year", "name": "年份",
|
||||
"value": [{"n": i["itemText"], "v": i["itemText"]} for i in d["yearList"]]},
|
||||
|
||||
{"key": "lang", "name": "语言",
|
||||
"value": [{"n": i["itemText"], "v": i["itemText"]} for i in d["languageList"]]},
|
||||
|
||||
{"key": "sort", "name": "排序", "value": current_sort_values}
|
||||
]
|
||||
result['class'] = classes
|
||||
result['filters'] = filters
|
||||
return result
|
||||
|
||||
def homeVideoContent(self):
|
||||
data1 = self.fetch(f"{self.host}/api/mw-movie/anonymous/v1/home/all/list", headers=self.getheaders()).json()
|
||||
data2=self.fetch(f"{self.host}/api/mw-movie/anonymous/home/hotSearch",headers=self.getheaders()).json()
|
||||
data=[]
|
||||
for i in data1['data'].values():
|
||||
data.extend(i['list'])
|
||||
data.extend(data2['data'])
|
||||
vods=self.getvod(data)
|
||||
return {'list':vods}
|
||||
|
||||
def categoryContent(self, tid, pg, filter, extend):
|
||||
|
||||
params = {
|
||||
"area": extend.get('area', ''),
|
||||
"filterStatus": "1",
|
||||
"lang": extend.get('lang', ''),
|
||||
"pageNum": pg,
|
||||
"pageSize": "30",
|
||||
"sort": extend.get('sort', '1'),
|
||||
"sortBy": "1",
|
||||
"type": extend.get('type', ''),
|
||||
"type1": tid,
|
||||
"v_class": extend.get('v_class', ''),
|
||||
"year": extend.get('year', '')
|
||||
}
|
||||
data = self.fetch(f"{self.host}/api/mw-movie/anonymous/video/list?{self.js(params)}", headers=self.getheaders(params)).json()
|
||||
result = {}
|
||||
result['list'] = self.getvod(data['data']['list'])
|
||||
result['page'] = pg
|
||||
result['pagecount'] = 9999
|
||||
result['limit'] = 90
|
||||
result['total'] = 999999
|
||||
return result
|
||||
|
||||
def detailContent(self, ids):
|
||||
data=self.fetch(f"{self.host}/api/mw-movie/anonymous/video/detail?id={ids[0]}",headers=self.getheaders({'id':ids[0]})).json()
|
||||
vod=self.getvod([data['data']])[0]
|
||||
vod['vod_play_from']='嗷呜有金牌'
|
||||
vod['vod_play_url'] = '#'.join(
|
||||
f"{i['name'] if len(vod['episodelist']) > 1 else vod['vod_name']}${ids[0]}@@{i['nid']}" for i in
|
||||
vod['episodelist'])
|
||||
vod.pop('episodelist', None)
|
||||
return {'list':[vod]}
|
||||
|
||||
def searchContent(self, key, quick, pg="1"):
|
||||
params = {
|
||||
"keyword": key,
|
||||
"pageNum": pg,
|
||||
"pageSize": "8",
|
||||
"sourceCode": "1"
|
||||
}
|
||||
data=self.fetch(f"{self.host}/api/mw-movie/anonymous/video/searchByWord?{self.js(params)}",headers=self.getheaders(params)).json()
|
||||
vods=self.getvod(data['data']['result']['list'])
|
||||
return {'list':vods,'page':pg}
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
self.header = {
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.6478.61 Chrome/126.0.6478.61 Not/A)Brand/8 Safari/537.36',
|
||||
'sec-ch-ua-platform': '"Windows"',
|
||||
'DNT': '1',
|
||||
'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="126", "Google Chrome";v="126"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'Origin': self.host,
|
||||
'Referer': f'{self.host}/'
|
||||
}
|
||||
ids=id.split('@@')
|
||||
pdata = self.fetch(f"{self.host}/api/mw-movie/anonymous/v2/video/episode/url?clientType=1&id={ids[0]}&nid={ids[1]}",headers=self.getheaders({'clientType':'1','id': ids[0], 'nid': ids[1]})).json()
|
||||
vlist=[]
|
||||
for i in pdata['data']['list']:vlist.extend([i['resolutionName'],i['url']])
|
||||
return {'parse':0,'url':vlist,'header':self.header}
|
||||
|
||||
def localProxy(self, param):
|
||||
pass
|
||||
|
||||
def host_late(self, url_list):
|
||||
if isinstance(url_list, str):
|
||||
urls = [u.strip() for u in url_list.split(',')]
|
||||
else:
|
||||
urls = url_list
|
||||
if len(urls) <= 1:
|
||||
return urls[0] if urls else ''
|
||||
|
||||
results = {}
|
||||
threads = []
|
||||
|
||||
def test_host(url):
|
||||
try:
|
||||
start_time = time.time()
|
||||
response = requests.head(url, timeout=1.0, allow_redirects=False)
|
||||
delay = (time.time() - start_time) * 1000
|
||||
results[url] = delay
|
||||
except Exception as e:
|
||||
results[url] = float('inf')
|
||||
for url in urls:
|
||||
t = threading.Thread(target=test_host, args=(url,))
|
||||
threads.append(t)
|
||||
t.start()
|
||||
for t in threads:
|
||||
t.join()
|
||||
return min(results.items(), key=lambda x: x[1])[0]
|
||||
|
||||
def md5(self, sign_key):
|
||||
md5_hash = MD5.new()
|
||||
md5_hash.update(sign_key.encode('utf-8'))
|
||||
md5_result = md5_hash.hexdigest()
|
||||
return md5_result
|
||||
|
||||
def js(self, param):
|
||||
return '&'.join(f"{k}={v}" for k, v in param.items())
|
||||
|
||||
def getheaders(self, param=None):
|
||||
if param is None:param = {}
|
||||
t=str(int(time.time()*1000))
|
||||
param['key']='cb808529bae6b6be45ecfab29a4889bc'
|
||||
param['t']=t
|
||||
sha1_hash = SHA1.new()
|
||||
sha1_hash.update(self.md5(self.js(param)).encode('utf-8'))
|
||||
sign = sha1_hash.hexdigest()
|
||||
deviceid = str(uuid.uuid4())
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.6478.61 Chrome/126.0.6478.61 Not/A)Brand/8 Safari/537.36',
|
||||
'Accept': 'application/json, text/plain, */*',
|
||||
'sign': sign,
|
||||
't': t,
|
||||
'deviceid':deviceid
|
||||
}
|
||||
return headers
|
||||
|
||||
def convert_field_name(self, field):
|
||||
field = field.lower()
|
||||
if field.startswith('vod') and len(field) > 3:
|
||||
field = field.replace('vod', 'vod_')
|
||||
if field.startswith('type') and len(field) > 4:
|
||||
field = field.replace('type', 'type_')
|
||||
return field
|
||||
|
||||
def getvod(self, array):
|
||||
return [{self.convert_field_name(k): v for k, v in item.items()} for item in array]
|
||||
|
132
PY/香蕉APP.py
Normal file
132
PY/香蕉APP.py
Normal file
@ -0,0 +1,132 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# by @嗷呜
|
||||
import random
|
||||
import string
|
||||
import sys
|
||||
sys.path.append('..')
|
||||
from base.spider import Spider
|
||||
|
||||
|
||||
class Spider(Spider):
|
||||
|
||||
def init(self, extend=""):
|
||||
self.host,self.headers = self.getat()
|
||||
pass
|
||||
|
||||
def getName(self):
|
||||
pass
|
||||
|
||||
def isVideoFormat(self, url):
|
||||
pass
|
||||
|
||||
def manualVideoCheck(self):
|
||||
pass
|
||||
|
||||
def destroy(self):
|
||||
pass
|
||||
|
||||
def homeContent(self, filter):
|
||||
data=self.fetch(f'{self.host}/vod/listing-0-0-0-0-0-0-0-0-0-0',headers=self.headers).json()
|
||||
result = {}
|
||||
classes = [{
|
||||
'type_name': '全部',
|
||||
'type_id': '0'
|
||||
}]
|
||||
filters = {}
|
||||
ft=[]
|
||||
filter_keys = ['orders', 'areas', 'years', 'definitions', 'durations', 'mosaics', 'langvoices']
|
||||
for key in filter_keys:
|
||||
if key in data['data']:
|
||||
filter_item = {
|
||||
'key': key,
|
||||
'name': key,
|
||||
'value': []
|
||||
}
|
||||
for item in data['data'][key]:
|
||||
first_two = dict(list(item.items())[:2])
|
||||
filter_item['value'].append({
|
||||
'v': list(first_two.values())[0],
|
||||
'n': list(first_two.values())[1]
|
||||
})
|
||||
ft.append(filter_item)
|
||||
filters['0']=ft
|
||||
for k in data['data']['categories']:
|
||||
classes.append({
|
||||
'type_name': k['catename'],
|
||||
'type_id': k['cateid']
|
||||
})
|
||||
filters[k['cateid']]=ft
|
||||
|
||||
result['class'] = classes
|
||||
result['filters'] =filters
|
||||
result['list'] = self.getlist(data['data']['vodrows'])
|
||||
return result
|
||||
|
||||
def homeVideoContent(self):
|
||||
pass
|
||||
|
||||
def categoryContent(self, tid, pg, filter, extend):
|
||||
data=self.fetch(f'{self.host}/vod/listing-{tid}-{extend.get("areas","0")}-{extend.get("years","0")}-1-{extend.get("definitions","0")}-{extend.get("durations","0")}-{extend.get("mosaics","0")}-{extend.get("langvoices","0")}-{extend.get("orders","0")}-{pg}',headers=self.headers).json()
|
||||
result = {}
|
||||
result['list'] = self.getlist(data['data']['vodrows'])
|
||||
result['page'] = pg
|
||||
result['pagecount'] = 9999
|
||||
result['limit'] = 90
|
||||
result['total'] = 999999
|
||||
return result
|
||||
|
||||
def detailContent(self, ids):
|
||||
data=self.fetch(f'{self.host}/vod/reqplay/{ids[0]}',headers=self.headers).json()
|
||||
vod = {
|
||||
'vod_play_from': data['errmsg'],
|
||||
'vod_play_url': '#'.join([f"{i['hdtype']}${i['httpurl']}" for i in data['data']['httpurls']]),
|
||||
}
|
||||
return {'list':[vod]}
|
||||
|
||||
def searchContent(self, key, quick, pg="1"):
|
||||
data=self.fetch(f'{self.host}/search?page={pg}&wd={key}',headers=self.headers).json()
|
||||
return {'list':self.getlist(data['data']['vodrows']),'page':pg}
|
||||
|
||||
def playerContent(self, flag, id, vipFlags):
|
||||
return {'parse': 0, 'url': id, 'header': {'User-Agent':'ExoPlayer'}}
|
||||
|
||||
def localProxy(self, param):
|
||||
pass
|
||||
|
||||
def getlist(self,data):
|
||||
vlist=[]
|
||||
for i in data:
|
||||
if i['isvip'] !='1':
|
||||
vlist.append({
|
||||
'vod_id': i['vodid'],
|
||||
'vod_name': i['title'],
|
||||
'vod_pic': i['coverpic'],
|
||||
'vod_year': i.get('duration'),
|
||||
'vod_remarks': i.get('catename'),
|
||||
'style': {"type": "rect", "ratio": 1.33}
|
||||
})
|
||||
return vlist
|
||||
|
||||
def getat(self):
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Linux; Android 11; M2012K10C Build/RP1A.200720.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/87.0.4280.141 Mobile Safari/537.36',
|
||||
'Accept': 'application/json, text/plain, */*',
|
||||
'x-auth-uuid': self.random_str(32),
|
||||
'x-system': 'Android',
|
||||
'x-version': '5.0.5',
|
||||
'x-channel': 'xj2',
|
||||
'x-requested-with': 'com.uyvzkv.pnjzdv',
|
||||
'sec-fetch-site': 'cross-site',
|
||||
'sec-fetch-mode': 'cors',
|
||||
'sec-fetch-dest': 'empty',
|
||||
'accept-language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7',
|
||||
}
|
||||
host=f'https://{self.random_str(6)}.bjhpz.com'
|
||||
data=self.fetch(f'{host}/init',headers=headers).json()
|
||||
headers.update({'x-cookie-auth': data['data']['globalData'].get('xxx_api_auth')})
|
||||
return host,headers
|
||||
|
||||
def random_str(self,length=16):
|
||||
chars = string.ascii_lowercase + string.digits
|
||||
return ''.join(random.choice(chars) for _ in range(length))
|
||||
|
Loading…
Reference in New Issue
Block a user