Add Support dongfangyaocao

This commit is contained in:
2025-08-11 22:20:19 +08:00
parent 6d80326a4e
commit d9d2ea9d99
11 changed files with 686 additions and 58 deletions

View File

@@ -1,18 +1,21 @@
# core/management/commands/crawl_xinhua.py
from django.core.management.base import BaseCommand
from core.models import Website
from core.utils import crawl_xinhua_list
from core.utils import full_site_crawler
class Command(BaseCommand):
help = '批量爬取新华网文章'
help = "全站递归爬取 www.news.cn"
def handle(self, *args, **options):
list_url = "https://www.news.cn/legal/index.html"
try:
website = Website.objects.get(base_url="https://www.news.cn/")
except Website.DoesNotExist:
self.stdout.write(self.style.ERROR("网站 https://www.news.cn/ 不存在,请先后台添加"))
return
self.stdout.write(f"开始爬取文章列表页: {list_url}")
crawl_xinhua_list(list_url, website)
self.stdout.write(self.style.SUCCESS("批量爬取完成"))
def handle(self, *args, **kwargs):
website, created = Website.objects.get_or_create(
name="www.news.cn",
defaults={
'article_list_url': 'https://www.news.cn/',
'article_selector': 'a'
}
)
start_url = "https://www.news.cn/"
self.stdout.write(f"开始全站爬取: {start_url}")
full_site_crawler(start_url, website, max_pages=500)
self.stdout.write("爬取完成")