Add Support CNGOV
This commit is contained in:
20
core/management/commands/crawl_cngov.py
Normal file
20
core/management/commands/crawl_cngov.py
Normal file
@@ -0,0 +1,20 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
from core.models import Website
|
||||
from core.utils import full_site_crawler
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "全站递归爬取 www.gov.cn"
|
||||
|
||||
def handle(self, *args, **kwargs):
|
||||
website, created = Website.objects.get_or_create(
|
||||
name="www.gov.cn",
|
||||
defaults={
|
||||
'article_list_url': 'https://www.gov.cn/',
|
||||
'article_selector': 'a'
|
||||
}
|
||||
)
|
||||
start_url = "https://www.gov.cn/"
|
||||
self.stdout.write(f"开始全站爬取: {start_url}")
|
||||
full_site_crawler(start_url, website, max_pages=500)
|
||||
self.stdout.write("爬取完成")
|
||||
@@ -90,6 +90,17 @@ def process_article(url, website):
|
||||
# 再增加对新内容结构的支持
|
||||
if not content_tag:
|
||||
content_tag = soup.find("div", id="ContentText")
|
||||
elif website.name == "www.gov.cn":
|
||||
# 中国政府网的文章结构处理
|
||||
title_tag = soup.find("h1") or soup.find("title")
|
||||
# 查找主要内容区域,通常在.mainBody或content中
|
||||
content_tag = (
|
||||
soup.find("div", class_="pages_content") or
|
||||
soup.find("div", class_="article_con") or
|
||||
soup.find("div", class_="content") or
|
||||
soup.find("div", id="content") or
|
||||
soup.find("div", class_="mainBody")
|
||||
)
|
||||
else:
|
||||
# 默认处理方式
|
||||
title_tag = soup.find("h1") or soup.find("title")
|
||||
@@ -189,6 +200,20 @@ def full_site_crawler(start_url, website, max_pages=1000):
|
||||
soup.find("div", id="ContentText") is not None or
|
||||
("/content/" in path and len(path) > 20)
|
||||
)
|
||||
elif website.name == "www.gov.cn":
|
||||
# 中国政府网的文章页面判断逻辑
|
||||
parsed_url = urlparse(url)
|
||||
path = parsed_url.path
|
||||
is_article_page = (
|
||||
soup.find("div", class_="pages_content") is not None or
|
||||
soup.find("div", class_="article_con") is not None or
|
||||
soup.find("div", class_="content") is not None or
|
||||
soup.find("div", id="content") is not None or
|
||||
soup.find("div", class_="mainBody") is not None or
|
||||
("/zhengce/" in path) or
|
||||
("/xinwen/" in path) or
|
||||
("/huoban/" in path)
|
||||
)
|
||||
else:
|
||||
# 默认判断逻辑
|
||||
is_article_page = (
|
||||
|
||||
Reference in New Issue
Block a user