Add Support All Platform
This commit is contained in:
@@ -9,7 +9,7 @@ class Command(BaseCommand):
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--media', type=str, help='指定要爬取的媒体,用逗号分隔')
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
help='指定平台类型: all(全部), web(网站), mobile(移动端)')
|
||||
help='指定平台类型: all(全部), web(网站), mobile(移动端)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
media_list = options['media']
|
||||
|
||||
@@ -8,8 +8,8 @@ class Command(BaseCommand):
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['cctv', 'cctvnews', 'mobile', 'all'],
|
||||
help='选择爬取平台: cctv(央视网), cctvnews(央视新闻), mobile(移动端), all(全部)')
|
||||
choices=['cctv', 'cctvnews', 'mobile', 'all'],
|
||||
help='选择爬取平台: cctv(央视网), cctvnews(央视新闻), mobile(移动端), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
@@ -8,8 +8,8 @@ class Command(BaseCommand):
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['china', 'province', 'all'],
|
||||
help='选择爬取平台: china(中国网主网), province(中国网一省份), all(全部)')
|
||||
choices=['china', 'province', 'all'],
|
||||
help='选择爬取平台: china(中国网主网), province(中国网一省份), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
@@ -8,8 +8,8 @@ class Command(BaseCommand):
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['chinadaily', 'mobile', 'all'],
|
||||
help='选择爬取平台: chinadaily(中国日报), mobile(移动端), all(全部)')
|
||||
choices=['chinadaily', 'mobile', 'all'],
|
||||
help='选择爬取平台: chinadaily(中国日报), mobile(移动端), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
@@ -8,8 +8,8 @@ class Command(BaseCommand):
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['chinanews', 'mobile', 'all'],
|
||||
help='选择爬取平台: chinanews(中国新闻社), mobile(移动端), all(全部)')
|
||||
choices=['chinanews', 'mobile', 'all'],
|
||||
help='选择爬取平台: chinanews(中国新闻社), mobile(移动端), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
@@ -8,8 +8,8 @@ class Command(BaseCommand):
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['fzrb', 'mobile', 'all'],
|
||||
help='选择爬取平台: fzrb(法治日报), mobile(移动端), all(全部)')
|
||||
choices=['fzrb', 'mobile', 'all'],
|
||||
help='选择爬取平台: fzrb(法治日报), mobile(移动端), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
@@ -8,8 +8,8 @@ class Command(BaseCommand):
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['gmrb', 'mobile', 'all'],
|
||||
help='选择爬取平台: gmrb(光明日报), mobile(移动端), all(全部)')
|
||||
choices=['gmrb', 'mobile', 'all'],
|
||||
help='选择爬取平台: gmrb(光明日报), mobile(移动端), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
@@ -8,8 +8,8 @@ class Command(BaseCommand):
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['grrb', 'mobile', 'all'],
|
||||
help='选择爬取平台: grrb(工人日报), mobile(移动端), all(全部)')
|
||||
choices=['grrb', 'mobile', 'all'],
|
||||
help='选择爬取平台: grrb(工人日报), mobile(移动端), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
@@ -8,8 +8,8 @@ class Command(BaseCommand):
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['jjrb', 'mobile', 'all'],
|
||||
help='选择爬取平台: jjrb(经济日报), mobile(移动端), all(全部)')
|
||||
choices=['jjrb', 'mobile', 'all'],
|
||||
help='选择爬取平台: jjrb(经济日报), mobile(移动端), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
@@ -8,8 +8,8 @@ class Command(BaseCommand):
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['nmrb', 'mobile', 'all'],
|
||||
help='选择爬取平台: nmrb(农民日报), mobile(移动端), all(全部)')
|
||||
choices=['nmrb', 'mobile', 'all'],
|
||||
help='选择爬取平台: nmrb(农民日报), mobile(移动端), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
@@ -8,8 +8,8 @@ class Command(BaseCommand):
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['pla', 'mobile', 'all'],
|
||||
help='选择爬取平台: pla(解放军报), mobile(移动端), all(全部)')
|
||||
choices=['pla', 'mobile', 'all'],
|
||||
help='选择爬取平台: pla(解放军报), mobile(移动端), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
@@ -8,8 +8,8 @@ class Command(BaseCommand):
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['qiushi', 'mobile', 'all'],
|
||||
help='选择爬取平台: qiushi(求是网), mobile(移动端), all(全部)')
|
||||
choices=['qiushi', 'mobile', 'all'],
|
||||
help='选择爬取平台: qiushi(求是网), mobile(移动端), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
@@ -8,8 +8,8 @@ class Command(BaseCommand):
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['qizhi', 'mobile', 'all'],
|
||||
help='选择爬取平台: qizhi(旗帜网), mobile(移动端), all(全部)')
|
||||
choices=['qizhi', 'mobile', 'all'],
|
||||
help='选择爬取平台: qizhi(旗帜网), mobile(移动端), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
@@ -8,8 +8,8 @@ class Command(BaseCommand):
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['peopleapp', 'people', 'paper', 'all'],
|
||||
help='选择爬取平台: peopleapp(客户端), people(人民网), paper(报纸), all(全部)')
|
||||
choices=['peopleapp', 'people', 'paper', 'all'],
|
||||
help='选择爬取平台: peopleapp(客户端), people(人民网), paper(报纸), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
@@ -8,8 +8,8 @@ class Command(BaseCommand):
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['news', 'xinhuanet', 'mobile', 'all'],
|
||||
help='选择爬取平台: news(新华网), xinhuanet(新华网主站), mobile(移动端), all(全部)')
|
||||
choices=['news', 'xinhuanet', 'mobile', 'all'],
|
||||
help='选择爬取平台: news(新华网), xinhuanet(新华网主站), mobile(移动端), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
@@ -8,8 +8,8 @@ class Command(BaseCommand):
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['xuexi', 'central', 'provincial', 'all'],
|
||||
help='选择爬取平台: xuexi(学习强国主站), central(中央媒体), provincial(省级平台), all(全部)')
|
||||
choices=['xuexi', 'central', 'provincial', 'all'],
|
||||
help='选择爬取平台: xuexi(学习强国主站), central(中央媒体), provincial(省级平台), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
@@ -8,8 +8,8 @@ class Command(BaseCommand):
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['xxsb', 'mobile', 'all'],
|
||||
help='选择爬取平台: xxsb(学习时报), mobile(移动端), all(全部)')
|
||||
choices=['xxsb', 'mobile', 'all'],
|
||||
help='选择爬取平台: xxsb(学习时报), mobile(移动端), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
@@ -8,8 +8,8 @@ class Command(BaseCommand):
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['zgfnb', 'mobile', 'all'],
|
||||
help='选择爬取平台: zgfnb(中国妇女报), mobile(移动端), all(全部)')
|
||||
choices=['zgfnb', 'mobile', 'all'],
|
||||
help='选择爬取平台: zgfnb(中国妇女报), mobile(移动端), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
@@ -8,8 +8,8 @@ class Command(BaseCommand):
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['zgjwjc', 'mobile', 'all'],
|
||||
help='选择爬取平台: zgjwjc(中国纪检监察报), mobile(移动端), all(全部)')
|
||||
choices=['zgjwjc', 'mobile', 'all'],
|
||||
help='选择爬取平台: zgjwjc(中国纪检监察报), mobile(移动端), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
@@ -8,8 +8,8 @@ class Command(BaseCommand):
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['zgqnb', 'mobile', 'all'],
|
||||
help='选择爬取平台: zgqnb(中国青年报), mobile(移动端), all(全部)')
|
||||
choices=['zgqnb', 'mobile', 'all'],
|
||||
help='选择爬取平台: zgqnb(中国青年报), mobile(移动端), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
@@ -77,7 +77,8 @@
|
||||
<div class="meta">
|
||||
网站: {{ article.website.name }} |
|
||||
发布时间: {{ article.pub_date|date:"Y-m-d H:i" }} |
|
||||
创建时间: {{ article.created_at|date:"Y-m-d H:i" }}
|
||||
创建时间: {{ article.created_at|date:"Y-m-d H:i" }} |
|
||||
源网址: <a href="{{ article.url }}" target="_blank">{{ article.url }}</a>
|
||||
</div>
|
||||
|
||||
<div class="content">
|
||||
|
||||
@@ -126,7 +126,8 @@ def process_article(url, website):
|
||||
soup.find("div", class_="content") or
|
||||
soup.find("div", id="content") or
|
||||
soup.find("div", class_="article") or
|
||||
soup.find("div", class_="main-content")
|
||||
soup.find("div", class_="main-content") or
|
||||
soup.find("span", id="detailContent") # 添加新华网特有的内容容器
|
||||
)
|
||||
elif website.name == "东方烟草报":
|
||||
# 优化东方烟草报的标题提取逻辑,按优先级尝试多种选择器
|
||||
@@ -445,7 +446,8 @@ def process_article(url, website):
|
||||
paging_element.decompose()
|
||||
|
||||
# 移除政协号客户端下载提示
|
||||
for zxh_element in content_tag.find_all("div", style=lambda x: x and "background:#F9F9F9;padding:50px" in x):
|
||||
for zxh_element in content_tag.find_all("div",
|
||||
style=lambda x: x and "background:#F9F9F9;padding:50px" in x):
|
||||
zxh_element.decompose()
|
||||
|
||||
# 移除版权信息
|
||||
@@ -503,6 +505,7 @@ def process_article(url, website):
|
||||
soup.find("title")
|
||||
)
|
||||
content_tag = (
|
||||
soup.find("div", id="detail") or # 添加学习时报特有内容容器
|
||||
soup.find("div", class_="content") or
|
||||
soup.find("div", class_="article-content") or
|
||||
soup.find("div", id="content") or
|
||||
@@ -511,6 +514,24 @@ def process_article(url, website):
|
||||
soup.find("div", class_="article") or
|
||||
soup.find("div", class_="article-body")
|
||||
)
|
||||
|
||||
# 针对学习时报的特殊处理,清理内容中的无关元素
|
||||
if content_tag:
|
||||
# 移除编辑信息
|
||||
for editor_element in content_tag.find_all("div", class_="editor"):
|
||||
editor_element.decompose()
|
||||
|
||||
# 移除分享相关元素
|
||||
for share_element in content_tag.find_all("div", class_="share"):
|
||||
share_element.decompose()
|
||||
|
||||
# 移除无关的TRS_Editor包装层
|
||||
for trs_editor in content_tag.find_all("div", class_="TRS_Editor"):
|
||||
trs_editor.unwrap() # unwrap只移除标签,保留内容
|
||||
|
||||
# 移除Custom_UnionStyle包装层
|
||||
for custom_style in content_tag.find_all("div", class_="Custom_UnionStyle"):
|
||||
custom_style.unwrap() # unwrap只移除标签,保留内容
|
||||
elif "中国青年报" in website.name or "cyol" in website.name:
|
||||
# 中国青年报的文章结构处理 - 修复无法爬取问题
|
||||
title_tag = (
|
||||
@@ -532,6 +553,7 @@ def process_article(url, website):
|
||||
title_tag = (
|
||||
soup.find("h1", class_="title") or
|
||||
soup.find("h1") or
|
||||
soup.find("p", class_="f_container_title") or # 添加中国妇女报特有标题容器
|
||||
soup.find("title")
|
||||
)
|
||||
content_tag = (
|
||||
@@ -541,7 +563,9 @@ def process_article(url, website):
|
||||
soup.find("div", class_="text") or
|
||||
soup.find("div", class_="main-content") or
|
||||
soup.find("div", class_="article") or
|
||||
soup.find("div", class_="article-body")
|
||||
soup.find("div", class_="article-body") or
|
||||
soup.find("div", class_="f_container_left") or # 添加中国妇女报特有内容容器
|
||||
soup.find("div", class_="f_container") # 添加另一种可能的内容容器
|
||||
)
|
||||
elif "法治日报" in website.name or "legaldaily" in website.name:
|
||||
# 法治日报的文章结构处理 - 修复无法爬取问题
|
||||
@@ -714,6 +738,28 @@ def process_article(url, website):
|
||||
if not src:
|
||||
src = video.get("data-url") or video.get("data-video")
|
||||
|
||||
# 新增:检查新华网特有的视频源属性
|
||||
if not src:
|
||||
src = video.get("data-video-src")
|
||||
|
||||
# 新增:针对新华网的特殊处理,从复杂播放器结构中提取视频源
|
||||
if not src and "新华网" in website.name:
|
||||
# 尝试从video标签的属性中直接获取src
|
||||
for attr in video.attrs:
|
||||
if 'src' in attr.lower():
|
||||
src = video.attrs.get(attr)
|
||||
break
|
||||
|
||||
# 如果还是没有找到,尝试查找父容器中的视频源信息
|
||||
if not src:
|
||||
parent = video.parent
|
||||
if parent and parent.name == 'div' and 'player-container' in parent.get('class', []):
|
||||
# 检查是否有data-*属性包含视频信息
|
||||
for attr, value in parent.attrs.items():
|
||||
if 'data' in attr and isinstance(value, str) and ('.mp4' in value or 'video' in value):
|
||||
src = value
|
||||
break
|
||||
|
||||
if not src:
|
||||
continue
|
||||
|
||||
@@ -726,6 +772,10 @@ def process_article(url, website):
|
||||
if "cctv.com" in src or "cntv.cn" in src:
|
||||
print(f"发现央视视频: {src}")
|
||||
|
||||
# 针对新华网的特殊处理
|
||||
elif "新华网" in website.name:
|
||||
print(f"发现新华网视频: {src}")
|
||||
|
||||
local_path = download_media(src, save_dir)
|
||||
if local_path:
|
||||
rel_path = os.path.relpath(local_path, settings.MEDIA_ROOT)
|
||||
@@ -812,6 +862,7 @@ def full_site_crawler(start_url, website, max_pages=1000):
|
||||
soup.find("div", id="content") is not None or
|
||||
soup.find("div", class_="article") is not None or
|
||||
soup.find("div", class_="main-content") is not None or
|
||||
soup.find("span", id="detailContent") is not None or # 添加新华网特有内容容器判断
|
||||
("/news/" in path) or
|
||||
("/article/" in path) or
|
||||
(path.startswith("/detail/") and len(path) > 10)
|
||||
@@ -1064,6 +1115,7 @@ def full_site_crawler(start_url, website, max_pages=1000):
|
||||
soup.find("div", class_="main-content") is not None or
|
||||
soup.find("div", class_="article") is not None or
|
||||
soup.find("div", class_="article-body") is not None or
|
||||
soup.find("div", id="detail") is not None or # 添加学习时报特有内容容器判断
|
||||
("/article/" in path) or
|
||||
("/content/" in path) or
|
||||
(path.startswith("/detail/") and len(path) > 10)
|
||||
@@ -1168,7 +1220,6 @@ def full_site_crawler(start_url, website, max_pages=1000):
|
||||
soup.find("div", class_="article-content") is not None or
|
||||
(soup.find("div", id="content") is not None and
|
||||
soup.find("h1") is not None) or
|
||||
soup.find("div", class_="text") is not None or
|
||||
soup.find("div", class_="main-content") is not None or
|
||||
soup.find("div", class_="article") is not None or
|
||||
soup.find("div", class_="article-body") is not None or
|
||||
@@ -1220,7 +1271,7 @@ def full_site_crawler(start_url, website, max_pages=1000):
|
||||
if ("/article/" in href_path or
|
||||
href_path.startswith("/detail/") or
|
||||
("/dynamic/" in href_path and "article" in href_path) or
|
||||
href_path.count("/") > 2): # 更深层的页面可能是文章页
|
||||
href_path.count("/") > 2): # 更深层
|
||||
queue.append(href)
|
||||
elif href not in visited and is_valid_url(href, base_netloc):
|
||||
queue.append(href)
|
||||
|
||||
Reference in New Issue
Block a user