Add Support All Platform

This commit is contained in:
2025-08-14 23:42:16 +08:00
parent 4994310f14
commit ac98ac0057
23 changed files with 267 additions and 215 deletions

View File

@@ -126,7 +126,8 @@ def process_article(url, website):
soup.find("div", class_="content") or
soup.find("div", id="content") or
soup.find("div", class_="article") or
soup.find("div", class_="main-content")
soup.find("div", class_="main-content") or
soup.find("span", id="detailContent") # 添加新华网特有的内容容器
)
elif website.name == "东方烟草报":
# 优化东方烟草报的标题提取逻辑,按优先级尝试多种选择器
@@ -177,45 +178,45 @@ def process_article(url, website):
soup.find("div", class_="rm_txt_con") or # 添加人民网特有的内容容器
soup.find("div", class_="text_c") # 添加新的内容容器
)
# 针对人民网的特殊处理,清理内容中的无关元素
if content_tag:
# 移除编辑信息
for editor_element in content_tag.find_all("div", class_="edit"):
editor_element.decompose()
# 移除分享相关元素
for share_element in content_tag.find_all("p", class_="paper_num"):
share_element.decompose()
# 移除无关的box_pic元素
for pic_element in content_tag.find_all("div", class_="box_pic"):
pic_element.decompose()
# 移除无关的zdfy元素
for zdfy_element in content_tag.find_all("div", class_="zdfy"):
zdfy_element.decompose()
# 移除无关的center元素
for center_element in content_tag.find_all("center"):
center_element.decompose()
# 移除无关的bza元素
for bza_element in content_tag.find_all("div", class_="bza"):
bza_element.decompose()
# 移除隐藏的无关元素
for hidden_element in content_tag.find_all(attrs={"style": "display: none;"}):
hidden_element.decompose()
# 移除相关专题
for related_element in content_tag.find_all("div", id="rwb_tjyd"):
related_element.decompose()
# 移除推荐阅读
for recommend_element in content_tag.find_all("div", class_="clearfix box_cai"):
recommend_element.decompose()
# 移除相关专题列表
for topic_element in content_tag.find_all("div", class_="clearfix text_like"):
topic_element.decompose()
@@ -414,7 +415,7 @@ def process_article(url, website):
title_text = first_p.find("strong").get_text().strip()
# 创建一个虚拟的title_tag对象
title_tag = first_p.find("strong")
content_tag = (
soup.find("div", class_="content") or
soup.find("div", class_="article-content") or
@@ -425,27 +426,28 @@ def process_article(url, website):
soup.find("div", class_="article-body") or
soup.find("div", class_="text_box") # 添加人民政协网特有内容容器
)
# 针对人民政协网的特殊处理,清理内容中的无关元素
if content_tag:
# 移除编辑信息
for editor_element in content_tag.find_all("p", class_="Editor"):
editor_element.decompose()
# 移除分享相关元素
for share_element in content_tag.find_all("div", class_="share"):
share_element.decompose()
# 移除Remark元素
for remark_element in content_tag.find_all("div", class_="Remark"):
remark_element.decompose()
# 移除Paging元素
for paging_element in content_tag.find_all("div", class_="Paging"):
paging_element.decompose()
# 移除政协号客户端下载提示
for zxh_element in content_tag.find_all("div", style=lambda x: x and "background:#F9F9F9;padding:50px" in x):
for zxh_element in content_tag.find_all("div",
style=lambda x: x and "background:#F9F9F9;padding:50px" in x):
zxh_element.decompose()
# 移除版权信息
@@ -503,6 +505,7 @@ def process_article(url, website):
soup.find("title")
)
content_tag = (
soup.find("div", id="detail") or # 添加学习时报特有内容容器
soup.find("div", class_="content") or
soup.find("div", class_="article-content") or
soup.find("div", id="content") or
@@ -511,6 +514,24 @@ def process_article(url, website):
soup.find("div", class_="article") or
soup.find("div", class_="article-body")
)
# 针对学习时报的特殊处理,清理内容中的无关元素
if content_tag:
# 移除编辑信息
for editor_element in content_tag.find_all("div", class_="editor"):
editor_element.decompose()
# 移除分享相关元素
for share_element in content_tag.find_all("div", class_="share"):
share_element.decompose()
# 移除无关的TRS_Editor包装层
for trs_editor in content_tag.find_all("div", class_="TRS_Editor"):
trs_editor.unwrap() # unwrap只移除标签保留内容
# 移除Custom_UnionStyle包装层
for custom_style in content_tag.find_all("div", class_="Custom_UnionStyle"):
custom_style.unwrap() # unwrap只移除标签保留内容
elif "中国青年报" in website.name or "cyol" in website.name:
# 中国青年报的文章结构处理 - 修复无法爬取问题
title_tag = (
@@ -532,6 +553,7 @@ def process_article(url, website):
title_tag = (
soup.find("h1", class_="title") or
soup.find("h1") or
soup.find("p", class_="f_container_title") or # 添加中国妇女报特有标题容器
soup.find("title")
)
content_tag = (
@@ -541,7 +563,9 @@ def process_article(url, website):
soup.find("div", class_="text") or
soup.find("div", class_="main-content") or
soup.find("div", class_="article") or
soup.find("div", class_="article-body")
soup.find("div", class_="article-body") or
soup.find("div", class_="f_container_left") or # 添加中国妇女报特有内容容器
soup.find("div", class_="f_container") # 添加另一种可能的内容容器
)
elif "法治日报" in website.name or "legaldaily" in website.name:
# 法治日报的文章结构处理 - 修复无法爬取问题
@@ -604,7 +628,7 @@ def process_article(url, website):
elif "旗帜网" in website.name or "qizhiwang" in website.name:
# 旗帜网的文章结构处理 - 修复不保存文章内容问题
title_tag = (
soup.find("div", class_="w1200 flag-text-tit clearfix") and
soup.find("div", class_="w1200 flag-text-tit clearfix") and
soup.find("div", class_="w1200 flag-text-tit clearfix").find("h1") or
soup.find("h1", class_="title") or
soup.find("h1") or
@@ -620,29 +644,29 @@ def process_article(url, website):
soup.find("div", class_="article") or
soup.find("div", class_="article-body")
)
# 针对旗帜网的特殊处理,清理内容中的无关元素
if content_tag:
# 移除编辑信息
for editor_element in content_tag.find_all("p", class_="editor"):
editor_element.decompose()
# 移除分享相关元素
for share_element in content_tag.find_all("div", class_="share-demo"):
share_element.decompose()
# 移除文字缩放相关元素
for scale_element in content_tag.find_all("div", class_="scale-main"):
scale_element.decompose()
# 移除无关的div.pic元素
for pic_element in content_tag.find_all("div", class_="pic"):
pic_element.decompose()
# 移除无关的zdfy元素
for zdfy_element in content_tag.find_all("div", class_="zdfy"):
zdfy_element.decompose()
# 移除无关的center元素
for center_element in content_tag.find_all("center"):
center_element.decompose()
@@ -714,6 +738,28 @@ def process_article(url, website):
if not src:
src = video.get("data-url") or video.get("data-video")
# 新增:检查新华网特有的视频源属性
if not src:
src = video.get("data-video-src")
# 新增:针对新华网的特殊处理,从复杂播放器结构中提取视频源
if not src and "新华网" in website.name:
# 尝试从video标签的属性中直接获取src
for attr in video.attrs:
if 'src' in attr.lower():
src = video.attrs.get(attr)
break
# 如果还是没有找到,尝试查找父容器中的视频源信息
if not src:
parent = video.parent
if parent and parent.name == 'div' and 'player-container' in parent.get('class', []):
# 检查是否有data-*属性包含视频信息
for attr, value in parent.attrs.items():
if 'data' in attr and isinstance(value, str) and ('.mp4' in value or 'video' in value):
src = value
break
if not src:
continue
@@ -726,6 +772,10 @@ def process_article(url, website):
if "cctv.com" in src or "cntv.cn" in src:
print(f"发现央视视频: {src}")
# 针对新华网的特殊处理
elif "新华网" in website.name:
print(f"发现新华网视频: {src}")
local_path = download_media(src, save_dir)
if local_path:
rel_path = os.path.relpath(local_path, settings.MEDIA_ROOT)
@@ -812,6 +862,7 @@ def full_site_crawler(start_url, website, max_pages=1000):
soup.find("div", id="content") is not None or
soup.find("div", class_="article") is not None or
soup.find("div", class_="main-content") is not None or
soup.find("span", id="detailContent") is not None or # 添加新华网特有内容容器判断
("/news/" in path) or
("/article/" in path) or
(path.startswith("/detail/") and len(path) > 10)
@@ -1064,6 +1115,7 @@ def full_site_crawler(start_url, website, max_pages=1000):
soup.find("div", class_="main-content") is not None or
soup.find("div", class_="article") is not None or
soup.find("div", class_="article-body") is not None or
soup.find("div", id="detail") is not None or # 添加学习时报特有内容容器判断
("/article/" in path) or
("/content/" in path) or
(path.startswith("/detail/") and len(path) > 10)
@@ -1168,7 +1220,6 @@ def full_site_crawler(start_url, website, max_pages=1000):
soup.find("div", class_="article-content") is not None or
(soup.find("div", id="content") is not None and
soup.find("h1") is not None) or
soup.find("div", class_="text") is not None or
soup.find("div", class_="main-content") is not None or
soup.find("div", class_="article") is not None or
soup.find("div", class_="article-body") is not None or
@@ -1220,7 +1271,7 @@ def full_site_crawler(start_url, website, max_pages=1000):
if ("/article/" in href_path or
href_path.startswith("/detail/") or
("/dynamic/" in href_path and "article" in href_path) or
href_path.count("/") > 2): # 更深层的页面可能是文章页
href_path.count("/") > 2): # 更深层
queue.append(href)
elif href not in visited and is_valid_url(href, base_netloc):
queue.append(href)