From 130999364fb7d07a325405852fe69893be47092b Mon Sep 17 00:00:00 2001 From: yuangyaa Date: Tue, 12 Aug 2025 17:13:24 +0800 Subject: [PATCH] Add Support CNGOV --- core/management/commands/crawl_cngov.py | 20 ++++++++++++++++++ core/utils.py | 27 ++++++++++++++++++++++++- 2 files changed, 46 insertions(+), 1 deletion(-) create mode 100644 core/management/commands/crawl_cngov.py diff --git a/core/management/commands/crawl_cngov.py b/core/management/commands/crawl_cngov.py new file mode 100644 index 0000000..4e1e4de --- /dev/null +++ b/core/management/commands/crawl_cngov.py @@ -0,0 +1,20 @@ +from django.core.management.base import BaseCommand +from core.models import Website +from core.utils import full_site_crawler + + +class Command(BaseCommand): + help = "全站递归爬取 www.gov.cn" + + def handle(self, *args, **kwargs): + website, created = Website.objects.get_or_create( + name="www.gov.cn", + defaults={ + 'article_list_url': 'https://www.gov.cn/', + 'article_selector': 'a' + } + ) + start_url = "https://www.gov.cn/" + self.stdout.write(f"开始全站爬取: {start_url}") + full_site_crawler(start_url, website, max_pages=500) + self.stdout.write("爬取完成") \ No newline at end of file diff --git a/core/utils.py b/core/utils.py index ce8236b..b6688fd 100644 --- a/core/utils.py +++ b/core/utils.py @@ -90,6 +90,17 @@ def process_article(url, website): # 再增加对新内容结构的支持 if not content_tag: content_tag = soup.find("div", id="ContentText") + elif website.name == "www.gov.cn": + # 中国政府网的文章结构处理 + title_tag = soup.find("h1") or soup.find("title") + # 查找主要内容区域,通常在.mainBody或content中 + content_tag = ( + soup.find("div", class_="pages_content") or + soup.find("div", class_="article_con") or + soup.find("div", class_="content") or + soup.find("div", id="content") or + soup.find("div", class_="mainBody") + ) else: # 默认处理方式 title_tag = soup.find("h1") or soup.find("title") @@ -189,6 +200,20 @@ def full_site_crawler(start_url, website, max_pages=1000): soup.find("div", id="ContentText") is not None or ("/content/" in path and len(path) > 20) ) + elif website.name == "www.gov.cn": + # 中国政府网的文章页面判断逻辑 + parsed_url = urlparse(url) + path = parsed_url.path + is_article_page = ( + soup.find("div", class_="pages_content") is not None or + soup.find("div", class_="article_con") is not None or + soup.find("div", class_="content") is not None or + soup.find("div", id="content") is not None or + soup.find("div", class_="mainBody") is not None or + ("/zhengce/" in path) or + ("/xinwen/" in path) or + ("/huoban/" in path) + ) else: # 默认判断逻辑 is_article_page = ( @@ -205,4 +230,4 @@ def full_site_crawler(start_url, website, max_pages=1000): for link in soup.find_all("a", href=True): href = urljoin(url, link["href"]) if href not in visited and is_valid_url(href, base_netloc): - queue.append(href) + queue.append(href) \ No newline at end of file