时间:2021-07-01 10:21:17 帮助过:24人阅读
#!/usr/bin/env python
__all__ = ['pptv_download', 'pptv_download_by_id']
from ..common import *
import re
import time
import urllib
from random import random
def constructKey(arg):
def str2hex(s):
r=""
for i in s[:8]:
t=hex(ord(i))[2:]
if len(t)==1:
t="0"+t
r+=t
for i in range(16):
r+=hex(int(15*random()))[2:]
return r
#ABANDONED Because SERVER_KEY is static
def getkey(s):
#returns 1896220160
l2=[i for i in s]
l4=0
l3=0
while l4>> in as3
if k>=0:
return k>>b
elif k<0:
return (2**32+k)>>b
pass
def lot(k,b):
return (k<([^<>]+)', xml)
k = r1(r']+>([^<>]+) ', xml)
rid = r1(r'rid="([^"]+)"', xml)
title = r1(r'nm="([^"]+)"', xml)
st=r1(r'([^<>]+) ',xml)[:-4]
st=time.mktime(time.strptime(st))*1000-60*1000-time.time()*1000
st+=time.time()*1000
st=st/1000
key=constructKey(st)
pieces = re.findall(']+fs="(\d+)"', xml)
numbers, fs = zip(*pieces)
urls=["http://{}/{}/{}?key={}&fpp.ver=1.3.0.4&k={}&type=web.fpp".format(host,i,rid,key,k) for i in range(max(map(int,numbers))+1)]
total_size = sum(map(int, fs))
assert rid.endswith('.mp4')
print_info(site_info, title, 'mp4', total_size)
if not info_only:
try:
download_urls(urls, title, 'mp4', total_size, output_dir = output_dir, merge = merge)
except urllib.error.HTTPError:
#for key expired
pptv_download_by_id(id, output_dir = output_dir, merge = merge, info_only = info_only)
def pptv_download(url, output_dir = '.', merge = True, info_only = False, **kwargs):
assert re.match(r'http://v.pptv.com/show/(\w+)\.html$', url)
html = get_html(url)
id = r1(r'webcfg\s*=\s*{"id":\s*(\d+)', html)
assert id
pptv_download_by_id(id, output_dir = output_dir, merge = merge, info_only = info_only)
#!/usr/bin/env python
__all__ = ['pptv_download', 'pptv_download_by_id']
from ..common import *
import re
import time
import urllib
from random import random
def constructKey(arg):
def str2hex(s):
r=""
for i in s[:8]:
t=hex(ord(i))[2:]
if len(t)==1:
t="0"+t
r+=t
for i in range(16):
r+=hex(int(15*random()))[2:]
return r
#ABANDONED Because SERVER_KEY is static
def getkey(s):
#returns 1896220160
l2=[i for i in s]
l4=0
l3=0
while l4>> in as3
if k>=0:
return k>>b
elif k<0:
return (2**32+k)>>b
pass
def lot(k,b):
return (k<([^<>]+)', xml)
k = r1(r']+>([^<>]+) ', xml)
rid = r1(r'rid="([^"]+)"', xml)
title = r1(r'nm="([^"]+)"', xml)
st=r1(r'([^<>]+) ',xml)[:-4]
st=time.mktime(time.strptime(st))*1000-60*1000-time.time()*1000
st+=time.time()*1000
st=st/1000
key=constructKey(st)
pieces = re.findall(']+fs="(\d+)"', xml)
numbers, fs = zip(*pieces)
urls=["http://{}/{}/{}?key={}&fpp.ver=1.3.0.4&k={}&type=web.fpp".format(host,i,rid,key,k) for i in range(max(map(int,numbers))+1)]
total_size = sum(map(int, fs))
assert rid.endswith('.mp4')
print_info(site_info, title, 'mp4', total_size)
if not info_only:
try:
download_urls(urls, title, 'mp4', total_size, output_dir = output_dir, merge = merge)
except urllib.error.HTTPError:
#for key expired
pptv_download_by_id(id, output_dir = output_dir, merge = merge, info_only = info_only)
def pptv_download(url, output_dir = '.', merge = True, info_only = False, **kwargs):
assert re.match(r'http://v.pptv.com/show/(\w+)\.html$', url)
html = get_html(url)
id = r1(r'webcfg\s*=\s*{"id":\s*(\d+)', html)
assert id
pptv_download_by_id(id, output_dir = output_dir, merge = merge, info_only = info_only)