Add Support CNGOV

This commit is contained in:
2025-08-12 17:13:24 +08:00
parent 958b087f54
commit 130999364f
2 changed files with 46 additions and 1 deletions

View File

@@ -90,6 +90,17 @@ def process_article(url, website):
# 再增加对新内容结构的支持
if not content_tag:
content_tag = soup.find("div", id="ContentText")
elif website.name == "www.gov.cn":
# 中国政府网的文章结构处理
title_tag = soup.find("h1") or soup.find("title")
# 查找主要内容区域,通常在.mainBody或content中
content_tag = (
soup.find("div", class_="pages_content") or
soup.find("div", class_="article_con") or
soup.find("div", class_="content") or
soup.find("div", id="content") or
soup.find("div", class_="mainBody")
)
else:
# 默认处理方式
title_tag = soup.find("h1") or soup.find("title")
@@ -189,6 +200,20 @@ def full_site_crawler(start_url, website, max_pages=1000):
soup.find("div", id="ContentText") is not None or
("/content/" in path and len(path) > 20)
)
elif website.name == "www.gov.cn":
# 中国政府网的文章页面判断逻辑
parsed_url = urlparse(url)
path = parsed_url.path
is_article_page = (
soup.find("div", class_="pages_content") is not None or
soup.find("div", class_="article_con") is not None or
soup.find("div", class_="content") is not None or
soup.find("div", id="content") is not None or
soup.find("div", class_="mainBody") is not None or
("/zhengce/" in path) or
("/xinwen/" in path) or
("/huoban/" in path)
)
else:
# 默认判断逻辑
is_article_page = (
@@ -205,4 +230,4 @@ def full_site_crawler(start_url, website, max_pages=1000):
for link in soup.find_all("a", href=True):
href = urljoin(url, link["href"])
if href not in visited and is_valid_url(href, base_netloc):
queue.append(href)
queue.append(href)