Add Support the other website
This commit is contained in:
77
core/management/commands/crawl_all_media.py
Normal file
77
core/management/commands/crawl_all_media.py
Normal file
@@ -0,0 +1,77 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.core.management import call_command
|
||||
from core.models import Website
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "批量爬取所有中央主流媒体"
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--media', type=str, help='指定要爬取的媒体,用逗号分隔')
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
help='指定平台类型: all(全部), web(网站), mobile(移动端)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
media_list = options['media']
|
||||
platform = options['platform']
|
||||
|
||||
# 所有中央主流媒体配置
|
||||
all_media = {
|
||||
'rmrb': 'crawl_rmrb',
|
||||
'xinhua': 'crawl_xinhua',
|
||||
'cctv': 'crawl_cctv',
|
||||
'qiushi': 'crawl_qiushi',
|
||||
'pla': 'crawl_pla',
|
||||
'gmrb': 'crawl_gmrb',
|
||||
'jjrb': 'crawl_jjrb',
|
||||
'chinadaily': 'crawl_chinadaily',
|
||||
'grrb': 'crawl_grrb',
|
||||
'kjrb': 'crawl_kjrb',
|
||||
'rmzxb': 'crawl_rmzxb',
|
||||
'zgjwjc': 'crawl_zgjwjc',
|
||||
'chinanews': 'crawl_chinanews',
|
||||
'xxsb': 'crawl_xxsb',
|
||||
'zgqnb': 'crawl_zgqnb',
|
||||
'zgfnb': 'crawl_zgfnb',
|
||||
'fzrb': 'crawl_fzrb',
|
||||
'nmrb': 'crawl_nmrb',
|
||||
'xuexi': 'crawl_xuexi',
|
||||
'qizhi': 'crawl_qizhi',
|
||||
'china': 'crawl_china'
|
||||
}
|
||||
|
||||
# 如果指定了特定媒体,则只爬取指定的媒体
|
||||
if media_list:
|
||||
target_media = [media.strip() for media in media_list.split(',')]
|
||||
else:
|
||||
target_media = list(all_media.keys())
|
||||
|
||||
self.stdout.write(f"开始批量爬取 {len(target_media)} 家中央主流媒体...")
|
||||
|
||||
for media in target_media:
|
||||
if media in all_media:
|
||||
command_name = all_media[media]
|
||||
try:
|
||||
self.stdout.write(f"正在爬取: {media}")
|
||||
call_command(command_name, platform=platform)
|
||||
self.stdout.write(self.style.SUCCESS(f"完成爬取: {media}"))
|
||||
except Exception as e:
|
||||
self.stdout.write(self.style.ERROR(f"爬取 {media} 失败: {e}"))
|
||||
else:
|
||||
self.stdout.write(self.style.WARNING(f"未知媒体: {media}"))
|
||||
|
||||
self.stdout.write(self.style.SUCCESS("所有中央主流媒体爬取完成"))
|
||||
|
||||
# 显示统计信息
|
||||
total_websites = Website.objects.count()
|
||||
total_articles = sum([website.article_set.count() for website in Website.objects.all()])
|
||||
|
||||
self.stdout.write(f"统计信息:")
|
||||
self.stdout.write(f"- 总网站数: {total_websites}")
|
||||
self.stdout.write(f"- 总文章数: {total_articles}")
|
||||
|
||||
# 显示各媒体文章数量
|
||||
self.stdout.write(f"各媒体文章数量:")
|
||||
for website in Website.objects.all():
|
||||
article_count = website.article_set.count()
|
||||
self.stdout.write(f"- {website.name}: {article_count} 篇")
|
||||
65
core/management/commands/crawl_cctv.py
Normal file
65
core/management/commands/crawl_cctv.py
Normal file
@@ -0,0 +1,65 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
from core.models import Website
|
||||
from core.utils import full_site_crawler
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "全站递归爬取 中央广播电视总台及其子网站、客户端、新媒体平台"
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['cctv', 'cctvnews', 'mobile', 'all'],
|
||||
help='选择爬取平台: cctv(央视网), cctvnews(央视新闻), mobile(移动端), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
# 中央广播电视总台各平台配置
|
||||
platforms = {
|
||||
'cctv': {
|
||||
'name': '央视网',
|
||||
'base_url': 'https://www.cctv.com',
|
||||
'start_url': 'https://www.cctv.com',
|
||||
'article_selector': 'a'
|
||||
},
|
||||
'cctvnews': {
|
||||
'name': '央视新闻',
|
||||
'base_url': 'https://news.cctv.com',
|
||||
'start_url': 'https://news.cctv.com',
|
||||
'article_selector': 'a'
|
||||
},
|
||||
'mobile': {
|
||||
'name': '央视移动端',
|
||||
'base_url': 'https://m.cctv.com',
|
||||
'start_url': 'https://m.cctv.com',
|
||||
'article_selector': 'a'
|
||||
}
|
||||
}
|
||||
|
||||
if platform == 'all':
|
||||
target_platforms = platforms.values()
|
||||
else:
|
||||
target_platforms = [platforms[platform]]
|
||||
|
||||
for platform_config in target_platforms:
|
||||
website, created = Website.objects.get_or_create(
|
||||
name=platform_config['name'],
|
||||
defaults={
|
||||
'base_url': platform_config['base_url'],
|
||||
'article_list_url': platform_config['start_url'],
|
||||
'article_selector': platform_config['article_selector']
|
||||
}
|
||||
)
|
||||
|
||||
# 确保更新已存在的网站对象的配置
|
||||
if not created:
|
||||
website.base_url = platform_config['base_url']
|
||||
website.article_list_url = platform_config['start_url']
|
||||
website.article_selector = platform_config['article_selector']
|
||||
website.save()
|
||||
|
||||
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
|
||||
full_site_crawler(platform_config['start_url'], website, max_pages=500)
|
||||
self.stdout.write(f"完成爬取: {platform_config['name']}")
|
||||
|
||||
self.stdout.write(self.style.SUCCESS("中央广播电视总台所有平台爬取完成"))
|
||||
59
core/management/commands/crawl_china.py
Normal file
59
core/management/commands/crawl_china.py
Normal file
@@ -0,0 +1,59 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
from core.models import Website
|
||||
from core.utils import full_site_crawler
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "全站递归爬取 中国网主网及中国网一省份,不转发二级子网站"
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['china', 'province', 'all'],
|
||||
help='选择爬取平台: china(中国网主网), province(中国网一省份), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
# 中国网各平台配置
|
||||
platforms = {
|
||||
'china': {
|
||||
'name': '中国网',
|
||||
'base_url': 'http://www.china.com.cn',
|
||||
'start_url': 'http://www.china.com.cn',
|
||||
'article_selector': 'a'
|
||||
},
|
||||
'province': {
|
||||
'name': '中国网一省份',
|
||||
'base_url': 'http://www.china.com.cn',
|
||||
'start_url': 'http://www.china.com.cn/province',
|
||||
'article_selector': 'a'
|
||||
}
|
||||
}
|
||||
|
||||
if platform == 'all':
|
||||
target_platforms = platforms.values()
|
||||
else:
|
||||
target_platforms = [platforms[platform]]
|
||||
|
||||
for platform_config in target_platforms:
|
||||
website, created = Website.objects.get_or_create(
|
||||
name=platform_config['name'],
|
||||
defaults={
|
||||
'base_url': platform_config['base_url'],
|
||||
'article_list_url': platform_config['start_url'],
|
||||
'article_selector': platform_config['article_selector']
|
||||
}
|
||||
)
|
||||
|
||||
# 确保更新已存在的网站对象的配置
|
||||
if not created:
|
||||
website.base_url = platform_config['base_url']
|
||||
website.article_list_url = platform_config['start_url']
|
||||
website.article_selector = platform_config['article_selector']
|
||||
website.save()
|
||||
|
||||
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
|
||||
full_site_crawler(platform_config['start_url'], website, max_pages=500)
|
||||
self.stdout.write(f"完成爬取: {platform_config['name']}")
|
||||
|
||||
self.stdout.write(self.style.SUCCESS("中国网所有平台爬取完成"))
|
||||
59
core/management/commands/crawl_chinadaily.py
Normal file
59
core/management/commands/crawl_chinadaily.py
Normal file
@@ -0,0 +1,59 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
from core.models import Website
|
||||
from core.utils import full_site_crawler
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "全站递归爬取 中国日报及其子网站、客户端、新媒体平台"
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['chinadaily', 'mobile', 'all'],
|
||||
help='选择爬取平台: chinadaily(中国日报), mobile(移动端), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
# 中国日报各平台配置
|
||||
platforms = {
|
||||
'chinadaily': {
|
||||
'name': '中国日报',
|
||||
'base_url': 'https://www.chinadaily.com.cn',
|
||||
'start_url': 'https://www.chinadaily.com.cn',
|
||||
'article_selector': 'a'
|
||||
},
|
||||
'mobile': {
|
||||
'name': '中国日报移动端',
|
||||
'base_url': 'https://m.chinadaily.com.cn',
|
||||
'start_url': 'https://m.chinadaily.com.cn',
|
||||
'article_selector': 'a'
|
||||
}
|
||||
}
|
||||
|
||||
if platform == 'all':
|
||||
target_platforms = platforms.values()
|
||||
else:
|
||||
target_platforms = [platforms[platform]]
|
||||
|
||||
for platform_config in target_platforms:
|
||||
website, created = Website.objects.get_or_create(
|
||||
name=platform_config['name'],
|
||||
defaults={
|
||||
'base_url': platform_config['base_url'],
|
||||
'article_list_url': platform_config['start_url'],
|
||||
'article_selector': platform_config['article_selector']
|
||||
}
|
||||
)
|
||||
|
||||
# 确保更新已存在的网站对象的配置
|
||||
if not created:
|
||||
website.base_url = platform_config['base_url']
|
||||
website.article_list_url = platform_config['start_url']
|
||||
website.article_selector = platform_config['article_selector']
|
||||
website.save()
|
||||
|
||||
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
|
||||
full_site_crawler(platform_config['start_url'], website, max_pages=500)
|
||||
self.stdout.write(f"完成爬取: {platform_config['name']}")
|
||||
|
||||
self.stdout.write(self.style.SUCCESS("中国日报所有平台爬取完成"))
|
||||
59
core/management/commands/crawl_chinanews.py
Normal file
59
core/management/commands/crawl_chinanews.py
Normal file
@@ -0,0 +1,59 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
from core.models import Website
|
||||
from core.utils import full_site_crawler
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "全站递归爬取 中国新闻社及其子网站、客户端、新媒体平台"
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['chinanews', 'mobile', 'all'],
|
||||
help='选择爬取平台: chinanews(中国新闻社), mobile(移动端), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
# 中国新闻社各平台配置
|
||||
platforms = {
|
||||
'chinanews': {
|
||||
'name': '中国新闻社',
|
||||
'base_url': 'https://www.chinanews.com.cn',
|
||||
'start_url': 'https://www.chinanews.com.cn',
|
||||
'article_selector': 'a'
|
||||
},
|
||||
'mobile': {
|
||||
'name': '中国新闻社移动端',
|
||||
'base_url': 'https://m.chinanews.com.cn',
|
||||
'start_url': 'https://m.chinanews.com.cn',
|
||||
'article_selector': 'a'
|
||||
}
|
||||
}
|
||||
|
||||
if platform == 'all':
|
||||
target_platforms = platforms.values()
|
||||
else:
|
||||
target_platforms = [platforms[platform]]
|
||||
|
||||
for platform_config in target_platforms:
|
||||
website, created = Website.objects.get_or_create(
|
||||
name=platform_config['name'],
|
||||
defaults={
|
||||
'base_url': platform_config['base_url'],
|
||||
'article_list_url': platform_config['start_url'],
|
||||
'article_selector': platform_config['article_selector']
|
||||
}
|
||||
)
|
||||
|
||||
# 确保更新已存在的网站对象的配置
|
||||
if not created:
|
||||
website.base_url = platform_config['base_url']
|
||||
website.article_list_url = platform_config['start_url']
|
||||
website.article_selector = platform_config['article_selector']
|
||||
website.save()
|
||||
|
||||
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
|
||||
full_site_crawler(platform_config['start_url'], website, max_pages=500)
|
||||
self.stdout.write(f"完成爬取: {platform_config['name']}")
|
||||
|
||||
self.stdout.write(self.style.SUCCESS("中国新闻社所有平台爬取完成"))
|
||||
59
core/management/commands/crawl_fzrb.py
Normal file
59
core/management/commands/crawl_fzrb.py
Normal file
@@ -0,0 +1,59 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
from core.models import Website
|
||||
from core.utils import full_site_crawler
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "全站递归爬取 法治日报及其子网站、客户端、新媒体平台"
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['fzrb', 'mobile', 'all'],
|
||||
help='选择爬取平台: fzrb(法治日报), mobile(移动端), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
# 法治日报各平台配置
|
||||
platforms = {
|
||||
'fzrb': {
|
||||
'name': '法治日报',
|
||||
'base_url': 'http://www.legaldaily.com.cn',
|
||||
'start_url': 'http://www.legaldaily.com.cn',
|
||||
'article_selector': 'a'
|
||||
},
|
||||
'mobile': {
|
||||
'name': '法治日报移动端',
|
||||
'base_url': 'http://m.legaldaily.com.cn',
|
||||
'start_url': 'http://m.legaldaily.com.cn',
|
||||
'article_selector': 'a'
|
||||
}
|
||||
}
|
||||
|
||||
if platform == 'all':
|
||||
target_platforms = platforms.values()
|
||||
else:
|
||||
target_platforms = [platforms[platform]]
|
||||
|
||||
for platform_config in target_platforms:
|
||||
website, created = Website.objects.get_or_create(
|
||||
name=platform_config['name'],
|
||||
defaults={
|
||||
'base_url': platform_config['base_url'],
|
||||
'article_list_url': platform_config['start_url'],
|
||||
'article_selector': platform_config['article_selector']
|
||||
}
|
||||
)
|
||||
|
||||
# 确保更新已存在的网站对象的配置
|
||||
if not created:
|
||||
website.base_url = platform_config['base_url']
|
||||
website.article_list_url = platform_config['start_url']
|
||||
website.article_selector = platform_config['article_selector']
|
||||
website.save()
|
||||
|
||||
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
|
||||
full_site_crawler(platform_config['start_url'], website, max_pages=500)
|
||||
self.stdout.write(f"完成爬取: {platform_config['name']}")
|
||||
|
||||
self.stdout.write(self.style.SUCCESS("法治日报所有平台爬取完成"))
|
||||
59
core/management/commands/crawl_gmrb.py
Normal file
59
core/management/commands/crawl_gmrb.py
Normal file
@@ -0,0 +1,59 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
from core.models import Website
|
||||
from core.utils import full_site_crawler
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "全站递归爬取 光明日报及其子网站、客户端、新媒体平台"
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['gmrb', 'mobile', 'all'],
|
||||
help='选择爬取平台: gmrb(光明日报), mobile(移动端), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
# 光明日报各平台配置
|
||||
platforms = {
|
||||
'gmrb': {
|
||||
'name': '光明日报',
|
||||
'base_url': 'https://www.gmw.cn',
|
||||
'start_url': 'https://www.gmw.cn',
|
||||
'article_selector': 'a'
|
||||
},
|
||||
'mobile': {
|
||||
'name': '光明日报移动端',
|
||||
'base_url': 'https://m.gmw.cn',
|
||||
'start_url': 'https://m.gmw.cn',
|
||||
'article_selector': 'a'
|
||||
}
|
||||
}
|
||||
|
||||
if platform == 'all':
|
||||
target_platforms = platforms.values()
|
||||
else:
|
||||
target_platforms = [platforms[platform]]
|
||||
|
||||
for platform_config in target_platforms:
|
||||
website, created = Website.objects.get_or_create(
|
||||
name=platform_config['name'],
|
||||
defaults={
|
||||
'base_url': platform_config['base_url'],
|
||||
'article_list_url': platform_config['start_url'],
|
||||
'article_selector': platform_config['article_selector']
|
||||
}
|
||||
)
|
||||
|
||||
# 确保更新已存在的网站对象的配置
|
||||
if not created:
|
||||
website.base_url = platform_config['base_url']
|
||||
website.article_list_url = platform_config['start_url']
|
||||
website.article_selector = platform_config['article_selector']
|
||||
website.save()
|
||||
|
||||
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
|
||||
full_site_crawler(platform_config['start_url'], website, max_pages=500)
|
||||
self.stdout.write(f"完成爬取: {platform_config['name']}")
|
||||
|
||||
self.stdout.write(self.style.SUCCESS("光明日报所有平台爬取完成"))
|
||||
59
core/management/commands/crawl_grrb.py
Normal file
59
core/management/commands/crawl_grrb.py
Normal file
@@ -0,0 +1,59 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
from core.models import Website
|
||||
from core.utils import full_site_crawler
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "全站递归爬取 工人日报及其子网站、客户端、新媒体平台"
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['grrb', 'mobile', 'all'],
|
||||
help='选择爬取平台: grrb(工人日报), mobile(移动端), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
# 工人日报各平台配置
|
||||
platforms = {
|
||||
'grrb': {
|
||||
'name': '工人日报',
|
||||
'base_url': 'http://www.workercn.cn',
|
||||
'start_url': 'http://www.workercn.cn',
|
||||
'article_selector': 'a'
|
||||
},
|
||||
'mobile': {
|
||||
'name': '工人日报移动端',
|
||||
'base_url': 'http://m.workercn.cn', # 修复:确保移动端URL正确
|
||||
'start_url': 'http://m.workercn.cn',
|
||||
'article_selector': 'a'
|
||||
}
|
||||
}
|
||||
|
||||
if platform == 'all':
|
||||
target_platforms = platforms.values()
|
||||
else:
|
||||
target_platforms = [platforms[platform]]
|
||||
|
||||
for platform_config in target_platforms:
|
||||
website, created = Website.objects.get_or_create(
|
||||
name=platform_config['name'],
|
||||
defaults={
|
||||
'base_url': platform_config['base_url'],
|
||||
'article_list_url': platform_config['start_url'],
|
||||
'article_selector': platform_config['article_selector']
|
||||
}
|
||||
)
|
||||
|
||||
# 确保更新已存在的网站对象的配置
|
||||
if not created:
|
||||
website.base_url = platform_config['base_url']
|
||||
website.article_list_url = platform_config['start_url']
|
||||
website.article_selector = platform_config['article_selector']
|
||||
website.save()
|
||||
|
||||
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
|
||||
full_site_crawler(platform_config['start_url'], website, max_pages=500)
|
||||
self.stdout.write(f"完成爬取: {platform_config['name']}")
|
||||
|
||||
self.stdout.write(self.style.SUCCESS("工人日报所有平台爬取完成"))
|
||||
53
core/management/commands/crawl_jjrb.py
Normal file
53
core/management/commands/crawl_jjrb.py
Normal file
@@ -0,0 +1,53 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
from core.models import Website
|
||||
from core.utils import full_site_crawler
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "全站递归爬取 经济日报及其子网站、客户端、新媒体平台"
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['jjrb', 'mobile', 'all'],
|
||||
help='选择爬取平台: jjrb(经济日报), mobile(移动端), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
# 经济日报各平台配置
|
||||
platforms = {
|
||||
'jjrb': {
|
||||
'name': '经济日报',
|
||||
'base_url': 'http://www.ce.cn',
|
||||
'start_url': 'http://www.ce.cn',
|
||||
'article_selector': 'a'
|
||||
},
|
||||
}
|
||||
|
||||
if platform == 'all':
|
||||
target_platforms = platforms.values()
|
||||
else:
|
||||
target_platforms = [platforms[platform]]
|
||||
|
||||
for platform_config in target_platforms:
|
||||
website, created = Website.objects.get_or_create(
|
||||
name=platform_config['name'],
|
||||
defaults={
|
||||
'base_url': platform_config['base_url'],
|
||||
'article_list_url': platform_config['start_url'],
|
||||
'article_selector': platform_config['article_selector']
|
||||
}
|
||||
)
|
||||
|
||||
# 确保更新已存在的网站对象的配置
|
||||
if not created:
|
||||
website.base_url = platform_config['base_url']
|
||||
website.article_list_url = platform_config['start_url']
|
||||
website.article_selector = platform_config['article_selector']
|
||||
website.save()
|
||||
|
||||
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
|
||||
full_site_crawler(platform_config['start_url'], website, max_pages=500)
|
||||
self.stdout.write(f"完成爬取: {platform_config['name']}")
|
||||
|
||||
self.stdout.write(self.style.SUCCESS("经济日报所有平台爬取完成"))
|
||||
60
core/management/commands/crawl_kjrb.py
Normal file
60
core/management/commands/crawl_kjrb.py
Normal file
@@ -0,0 +1,60 @@
|
||||
### 不支援
|
||||
from django.core.management.base import BaseCommand
|
||||
from core.models import Website
|
||||
from core.utils import full_site_crawler
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "全站递归爬取 科技日报及其子网站、客户端、新媒体平台"
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['kjrb', 'mobile', 'all'],
|
||||
help='选择爬取平台: kjrb(科技日报), mobile(移动端), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
# 科技日报各平台配置
|
||||
platforms = {
|
||||
'kjrb': {
|
||||
'name': '科技日报',
|
||||
'base_url': 'http://digitalpaper.stdaily.com',
|
||||
'start_url': 'http://digitalpaper.stdaily.com',
|
||||
'article_selector': 'a'
|
||||
},
|
||||
'mobile': {
|
||||
'name': '科技日报移动端',
|
||||
'base_url': 'http://m.stdaily.com',
|
||||
'start_url': 'http://m.stdaily.com',
|
||||
'article_selector': 'a'
|
||||
}
|
||||
}
|
||||
|
||||
if platform == 'all':
|
||||
target_platforms = platforms.values()
|
||||
else:
|
||||
target_platforms = [platforms[platform]]
|
||||
|
||||
for platform_config in target_platforms:
|
||||
website, created = Website.objects.get_or_create(
|
||||
name=platform_config['name'],
|
||||
defaults={
|
||||
'base_url': platform_config['base_url'],
|
||||
'article_list_url': platform_config['start_url'],
|
||||
'article_selector': platform_config['article_selector']
|
||||
}
|
||||
)
|
||||
|
||||
# 确保更新已存在的网站对象的配置
|
||||
if not created:
|
||||
website.base_url = platform_config['base_url']
|
||||
website.article_list_url = platform_config['start_url']
|
||||
website.article_selector = platform_config['article_selector']
|
||||
website.save()
|
||||
|
||||
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
|
||||
full_site_crawler(platform_config['start_url'], website, max_pages=500)
|
||||
self.stdout.write(f"完成爬取: {platform_config['name']}")
|
||||
|
||||
self.stdout.write(self.style.SUCCESS("科技日报所有平台爬取完成"))
|
||||
59
core/management/commands/crawl_nmrb.py
Normal file
59
core/management/commands/crawl_nmrb.py
Normal file
@@ -0,0 +1,59 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
from core.models import Website
|
||||
from core.utils import full_site_crawler
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "全站递归爬取 农民日报及其子网站、客户端、新媒体平台"
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['nmrb', 'mobile', 'all'],
|
||||
help='选择爬取平台: nmrb(农民日报), mobile(移动端), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
# 农民日报各平台配置
|
||||
platforms = {
|
||||
'nmrb': {
|
||||
'name': '农民日报',
|
||||
'base_url': 'http://www.farmer.com.cn',
|
||||
'start_url': 'http://www.farmer.com.cn',
|
||||
'article_selector': 'a'
|
||||
},
|
||||
'mobile': {
|
||||
'name': '农民日报移动端',
|
||||
'base_url': 'http://m.farmer.com.cn',
|
||||
'start_url': 'http://m.farmer.com.cn',
|
||||
'article_selector': 'a'
|
||||
}
|
||||
}
|
||||
|
||||
if platform == 'all':
|
||||
target_platforms = platforms.values()
|
||||
else:
|
||||
target_platforms = [platforms[platform]]
|
||||
|
||||
for platform_config in target_platforms:
|
||||
website, created = Website.objects.get_or_create(
|
||||
name=platform_config['name'],
|
||||
defaults={
|
||||
'base_url': platform_config['base_url'],
|
||||
'article_list_url': platform_config['start_url'],
|
||||
'article_selector': platform_config['article_selector']
|
||||
}
|
||||
)
|
||||
|
||||
# 确保更新已存在的网站对象的配置
|
||||
if not created:
|
||||
website.base_url = platform_config['base_url']
|
||||
website.article_list_url = platform_config['start_url']
|
||||
website.article_selector = platform_config['article_selector']
|
||||
website.save()
|
||||
|
||||
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
|
||||
full_site_crawler(platform_config['start_url'], website, max_pages=500)
|
||||
self.stdout.write(f"完成爬取: {platform_config['name']}")
|
||||
|
||||
self.stdout.write(self.style.SUCCESS("农民日报所有平台爬取完成"))
|
||||
53
core/management/commands/crawl_pla.py
Normal file
53
core/management/commands/crawl_pla.py
Normal file
@@ -0,0 +1,53 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
from core.models import Website
|
||||
from core.utils import full_site_crawler
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "全站递归爬取 解放军报及其子网站、客户端、新媒体平台"
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['pla', 'mobile', 'all'],
|
||||
help='选择爬取平台: pla(解放军报), mobile(移动端), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
# 解放军报各平台配置
|
||||
platforms = {
|
||||
'pla': {
|
||||
'name': '解放军报',
|
||||
'base_url': 'https://www.81.cn',
|
||||
'start_url': 'https://www.81.cn',
|
||||
'article_selector': 'a'
|
||||
},
|
||||
}
|
||||
|
||||
if platform == 'all':
|
||||
target_platforms = platforms.values()
|
||||
else:
|
||||
target_platforms = [platforms[platform]]
|
||||
|
||||
for platform_config in target_platforms:
|
||||
website, created = Website.objects.get_or_create(
|
||||
name=platform_config['name'],
|
||||
defaults={
|
||||
'base_url': platform_config['base_url'],
|
||||
'article_list_url': platform_config['start_url'],
|
||||
'article_selector': platform_config['article_selector']
|
||||
}
|
||||
)
|
||||
|
||||
# 确保更新已存在的网站对象的配置
|
||||
if not created:
|
||||
website.base_url = platform_config['base_url']
|
||||
website.article_list_url = platform_config['start_url']
|
||||
website.article_selector = platform_config['article_selector']
|
||||
website.save()
|
||||
|
||||
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
|
||||
full_site_crawler(platform_config['start_url'], website, max_pages=500)
|
||||
self.stdout.write(f"完成爬取: {platform_config['name']}")
|
||||
|
||||
self.stdout.write(self.style.SUCCESS("解放军报所有平台爬取完成"))
|
||||
59
core/management/commands/crawl_qiushi.py
Normal file
59
core/management/commands/crawl_qiushi.py
Normal file
@@ -0,0 +1,59 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
from core.models import Website
|
||||
from core.utils import full_site_crawler
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "全站递归爬取 求是杂志及其子网站、客户端、新媒体平台"
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['qiushi', 'mobile', 'all'],
|
||||
help='选择爬取平台: qiushi(求是网), mobile(移动端), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
# 求是杂志各平台配置
|
||||
platforms = {
|
||||
'qiushi': {
|
||||
'name': '求是网',
|
||||
'base_url': 'https://www.qstheory.cn',
|
||||
'start_url': 'https://www.qstheory.cn',
|
||||
'article_selector': 'a'
|
||||
},
|
||||
'mobile': {
|
||||
'name': '求是移动端',
|
||||
'base_url': 'http://m.qstheory.cn',
|
||||
'start_url': 'http://m.qstheory.cn',
|
||||
'article_selector': 'a'
|
||||
}
|
||||
}
|
||||
|
||||
if platform == 'all':
|
||||
target_platforms = platforms.values()
|
||||
else:
|
||||
target_platforms = [platforms[platform]]
|
||||
|
||||
for platform_config in target_platforms:
|
||||
website, created = Website.objects.get_or_create(
|
||||
name=platform_config['name'],
|
||||
defaults={
|
||||
'base_url': platform_config['base_url'],
|
||||
'article_list_url': platform_config['start_url'],
|
||||
'article_selector': platform_config['article_selector']
|
||||
}
|
||||
)
|
||||
|
||||
# 确保更新已存在的网站对象的配置
|
||||
if not created:
|
||||
website.base_url = platform_config['base_url']
|
||||
website.article_list_url = platform_config['start_url']
|
||||
website.article_selector = platform_config['article_selector']
|
||||
website.save()
|
||||
|
||||
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
|
||||
full_site_crawler(platform_config['start_url'], website, max_pages=500)
|
||||
self.stdout.write(f"完成爬取: {platform_config['name']}")
|
||||
|
||||
self.stdout.write(self.style.SUCCESS("求是杂志所有平台爬取完成"))
|
||||
59
core/management/commands/crawl_qizhi.py
Normal file
59
core/management/commands/crawl_qizhi.py
Normal file
@@ -0,0 +1,59 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
from core.models import Website
|
||||
from core.utils import full_site_crawler
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "全站递归爬取 旗帜网及其子网站、客户端、新媒体平台"
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['qizhi', 'mobile', 'all'],
|
||||
help='选择爬取平台: qizhi(旗帜网), mobile(移动端), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
# 旗帜网各平台配置
|
||||
platforms = {
|
||||
'qizhi': {
|
||||
'name': '旗帜网',
|
||||
'base_url': 'http://www.qizhiwang.org.cn',
|
||||
'start_url': 'http://www.qizhiwang.org.cn',
|
||||
'article_selector': 'a[href^="/"]' # 修改选择器以更好地匹配文章链接
|
||||
},
|
||||
'mobile': {
|
||||
'name': '旗帜网移动端',
|
||||
'base_url': 'http://m.qizhiwang.org.cn',
|
||||
'start_url': 'http://m.qizhiwang.org.cn',
|
||||
'article_selector': 'a[href^="/"]' # 修改选择器以更好地匹配文章链接
|
||||
}
|
||||
}
|
||||
|
||||
if platform == 'all':
|
||||
target_platforms = platforms.values()
|
||||
else:
|
||||
target_platforms = [platforms[platform]]
|
||||
|
||||
for platform_config in target_platforms:
|
||||
website, created = Website.objects.get_or_create(
|
||||
name=platform_config['name'],
|
||||
defaults={
|
||||
'base_url': platform_config['base_url'],
|
||||
'article_list_url': platform_config['start_url'],
|
||||
'article_selector': platform_config['article_selector']
|
||||
}
|
||||
)
|
||||
|
||||
# 确保更新已存在的网站对象的配置
|
||||
if not created:
|
||||
website.base_url = platform_config['base_url']
|
||||
website.article_list_url = platform_config['start_url']
|
||||
website.article_selector = platform_config['article_selector']
|
||||
website.save()
|
||||
|
||||
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
|
||||
full_site_crawler(platform_config['start_url'], website, max_pages=500)
|
||||
self.stdout.write(f"完成爬取: {platform_config['name']}")
|
||||
|
||||
self.stdout.write(self.style.SUCCESS("旗帜网所有平台爬取完成"))
|
||||
@@ -4,23 +4,62 @@ from core.utils import full_site_crawler
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "全站递归爬取 人民日报 https://www.peopleapp.com"
|
||||
help = "全站递归爬取 人民日报及其子网站、客户端、新媒体平台"
|
||||
|
||||
def handle(self, *args, **kwargs):
|
||||
website, created = Website.objects.get_or_create(
|
||||
name="人民日报",
|
||||
defaults={
|
||||
'article_list_url': 'https://www.peopleapp.com/home',
|
||||
'article_selector': 'a',
|
||||
'base_url': 'https://www.peopleapp.com'
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['peopleapp', 'people', 'paper', 'all'],
|
||||
help='选择爬取平台: peopleapp(客户端), people(人民网), paper(报纸), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
# 人民日报各平台配置
|
||||
platforms = {
|
||||
'peopleapp': {
|
||||
'name': '人民日报客户端',
|
||||
'base_url': 'https://www.peopleapp.com',
|
||||
'start_url': 'https://www.peopleapp.com/home',
|
||||
'article_selector': 'a'
|
||||
},
|
||||
'people': {
|
||||
'name': '人民网',
|
||||
'base_url': 'https://www.people.com.cn',
|
||||
'start_url': 'https://www.people.com.cn',
|
||||
'article_selector': 'a'
|
||||
},
|
||||
'paper': {
|
||||
'name': '人民日报报纸',
|
||||
'base_url': 'http://paper.people.com.cn',
|
||||
'start_url': 'http://paper.people.com.cn',
|
||||
'article_selector': 'a'
|
||||
}
|
||||
)
|
||||
# 确保更新已存在的网站对象的base_url
|
||||
if not created and not website.base_url:
|
||||
website.base_url = 'https://www.peopleapp.com'
|
||||
website.save()
|
||||
}
|
||||
|
||||
if platform == 'all':
|
||||
target_platforms = platforms.values()
|
||||
else:
|
||||
target_platforms = [platforms[platform]]
|
||||
|
||||
for platform_config in target_platforms:
|
||||
website, created = Website.objects.get_or_create(
|
||||
name=platform_config['name'],
|
||||
defaults={
|
||||
'base_url': platform_config['base_url'],
|
||||
'article_list_url': platform_config['start_url'],
|
||||
'article_selector': platform_config['article_selector']
|
||||
}
|
||||
)
|
||||
|
||||
start_url = "https://www.peopleapp.com/home"
|
||||
self.stdout.write(f"开始全站爬取: {start_url}")
|
||||
full_site_crawler(start_url, website, max_pages=500)
|
||||
self.stdout.write("爬取完成")
|
||||
# 确保更新已存在的网站对象的配置
|
||||
if not created:
|
||||
website.base_url = platform_config['base_url']
|
||||
website.article_list_url = platform_config['start_url']
|
||||
website.article_selector = platform_config['article_selector']
|
||||
website.save()
|
||||
|
||||
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
|
||||
full_site_crawler(platform_config['start_url'], website, max_pages=500)
|
||||
self.stdout.write(f"完成爬取: {platform_config['name']}")
|
||||
|
||||
self.stdout.write(self.style.SUCCESS("人民日报所有平台爬取完成"))
|
||||
53
core/management/commands/crawl_rmzxb.py
Normal file
53
core/management/commands/crawl_rmzxb.py
Normal file
@@ -0,0 +1,53 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
from core.models import Website
|
||||
from core.utils import full_site_crawler
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "全站递归爬取 人民政协网及其子网站、客户端、新媒体平台"
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['rmzxb', 'mobile', 'all'],
|
||||
help='选择爬取平台: rmzxb(人民政协网), mobile(移动端), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
# 人民政协网各平台配置
|
||||
platforms = {
|
||||
'rmzxb': {
|
||||
'name': '人民政协网',
|
||||
'base_url': 'https://www.rmzxw.com.cn',
|
||||
'start_url': 'https://www.rmzxw.com.cn',
|
||||
'article_selector': 'a'
|
||||
},
|
||||
}
|
||||
|
||||
if platform == 'all':
|
||||
target_platforms = platforms.values()
|
||||
else:
|
||||
target_platforms = [platforms[platform]]
|
||||
|
||||
for platform_config in target_platforms:
|
||||
website, created = Website.objects.get_or_create(
|
||||
name=platform_config['name'],
|
||||
defaults={
|
||||
'base_url': platform_config['base_url'],
|
||||
'article_list_url': platform_config['start_url'],
|
||||
'article_selector': platform_config['article_selector']
|
||||
}
|
||||
)
|
||||
|
||||
# 确保更新已存在的网站对象的配置
|
||||
if not created:
|
||||
website.base_url = platform_config['base_url']
|
||||
website.article_list_url = platform_config['start_url']
|
||||
website.article_selector = platform_config['article_selector']
|
||||
website.save()
|
||||
|
||||
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
|
||||
full_site_crawler(platform_config['start_url'], website, max_pages=500)
|
||||
self.stdout.write(f"完成爬取: {platform_config['name']}")
|
||||
|
||||
self.stdout.write(self.style.SUCCESS("人民政协网所有平台爬取完成"))
|
||||
@@ -4,17 +4,62 @@ from core.utils import full_site_crawler
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "全站递归爬取 www.news.cn"
|
||||
help = "全站递归爬取 新华社及其子网站、客户端、新媒体平台"
|
||||
|
||||
def handle(self, *args, **kwargs):
|
||||
website, created = Website.objects.get_or_create(
|
||||
name="新华网",
|
||||
defaults={
|
||||
'article_list_url': 'https://www.news.cn/',
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['news', 'xinhuanet', 'mobile', 'all'],
|
||||
help='选择爬取平台: news(新华网), xinhuanet(新华网主站), mobile(移动端), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
# 新华社各平台配置
|
||||
platforms = {
|
||||
'news': {
|
||||
'name': '新华网',
|
||||
'base_url': 'https://www.news.cn',
|
||||
'start_url': 'https://www.news.cn',
|
||||
'article_selector': 'a'
|
||||
},
|
||||
'xinhuanet': {
|
||||
'name': '新华网主站',
|
||||
'base_url': 'https://www.xinhuanet.com',
|
||||
'start_url': 'https://www.xinhuanet.com',
|
||||
'article_selector': 'a'
|
||||
},
|
||||
'mobile': {
|
||||
'name': '新华社移动端',
|
||||
'base_url': 'https://m.xinhuanet.com',
|
||||
'start_url': 'https://m.xinhuanet.com',
|
||||
'article_selector': 'a'
|
||||
}
|
||||
)
|
||||
start_url = "https://www.news.cn/"
|
||||
self.stdout.write(f"开始全站爬取: {start_url}")
|
||||
full_site_crawler(start_url, website, max_pages=500)
|
||||
self.stdout.write("爬取完成")
|
||||
}
|
||||
|
||||
if platform == 'all':
|
||||
target_platforms = platforms.values()
|
||||
else:
|
||||
target_platforms = [platforms[platform]]
|
||||
|
||||
for platform_config in target_platforms:
|
||||
website, created = Website.objects.get_or_create(
|
||||
name=platform_config['name'],
|
||||
defaults={
|
||||
'base_url': platform_config['base_url'],
|
||||
'article_list_url': platform_config['start_url'],
|
||||
'article_selector': platform_config['article_selector']
|
||||
}
|
||||
)
|
||||
|
||||
# 确保更新已存在的网站对象的配置
|
||||
if not created:
|
||||
website.base_url = platform_config['base_url']
|
||||
website.article_list_url = platform_config['start_url']
|
||||
website.article_selector = platform_config['article_selector']
|
||||
website.save()
|
||||
|
||||
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
|
||||
full_site_crawler(platform_config['start_url'], website, max_pages=500)
|
||||
self.stdout.write(f"完成爬取: {platform_config['name']}")
|
||||
|
||||
self.stdout.write(self.style.SUCCESS("新华社所有平台爬取完成"))
|
||||
|
||||
65
core/management/commands/crawl_xuexi.py
Normal file
65
core/management/commands/crawl_xuexi.py
Normal file
@@ -0,0 +1,65 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
from core.models import Website
|
||||
from core.utils import full_site_crawler
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "全站递归爬取 学习强国中央媒体学习号及省级以上学习平台"
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['xuexi', 'central', 'provincial', 'all'],
|
||||
help='选择爬取平台: xuexi(学习强国主站), central(中央媒体), provincial(省级平台), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
# 学习强国各平台配置
|
||||
platforms = {
|
||||
'xuexi': {
|
||||
'name': '学习强国',
|
||||
'base_url': 'https://www.xuexi.cn',
|
||||
'start_url': 'https://www.xuexi.cn',
|
||||
'article_selector': 'a'
|
||||
},
|
||||
'central': {
|
||||
'name': '学习强国中央媒体',
|
||||
'base_url': 'https://www.xuexi.cn',
|
||||
'start_url': 'https://www.xuexi.cn/central',
|
||||
'article_selector': 'a'
|
||||
},
|
||||
'provincial': {
|
||||
'name': '学习强国省级平台',
|
||||
'base_url': 'https://www.xuexi.cn',
|
||||
'start_url': 'https://www.xuexi.cn/provincial',
|
||||
'article_selector': 'a'
|
||||
}
|
||||
}
|
||||
|
||||
if platform == 'all':
|
||||
target_platforms = platforms.values()
|
||||
else:
|
||||
target_platforms = [platforms[platform]]
|
||||
|
||||
for platform_config in target_platforms:
|
||||
website, created = Website.objects.get_or_create(
|
||||
name=platform_config['name'],
|
||||
defaults={
|
||||
'base_url': platform_config['base_url'],
|
||||
'article_list_url': platform_config['start_url'],
|
||||
'article_selector': platform_config['article_selector']
|
||||
}
|
||||
)
|
||||
|
||||
# 确保更新已存在的网站对象的配置
|
||||
if not created:
|
||||
website.base_url = platform_config['base_url']
|
||||
website.article_list_url = platform_config['start_url']
|
||||
website.article_selector = platform_config['article_selector']
|
||||
website.save()
|
||||
|
||||
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
|
||||
full_site_crawler(platform_config['start_url'], website, max_pages=500)
|
||||
self.stdout.write(f"完成爬取: {platform_config['name']}")
|
||||
|
||||
self.stdout.write(self.style.SUCCESS("学习强国所有平台爬取完成"))
|
||||
59
core/management/commands/crawl_xxsb.py
Normal file
59
core/management/commands/crawl_xxsb.py
Normal file
@@ -0,0 +1,59 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
from core.models import Website
|
||||
from core.utils import full_site_crawler
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "全站递归爬取 学习时报及其子网站、客户端、新媒体平台"
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['xxsb', 'mobile', 'all'],
|
||||
help='选择爬取平台: xxsb(学习时报), mobile(移动端), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
# 学习时报各平台配置
|
||||
platforms = {
|
||||
'xxsb': {
|
||||
'name': '学习时报',
|
||||
'base_url': 'http://www.studytimes.cn',
|
||||
'start_url': 'http://www.studytimes.cn',
|
||||
'article_selector': 'a'
|
||||
},
|
||||
'mobile': {
|
||||
'name': '学习时报移动端',
|
||||
'base_url': 'http://m.studytimes.cn',
|
||||
'start_url': 'http://m.studytimes.cn',
|
||||
'article_selector': 'a'
|
||||
}
|
||||
}
|
||||
|
||||
if platform == 'all':
|
||||
target_platforms = platforms.values()
|
||||
else:
|
||||
target_platforms = [platforms[platform]]
|
||||
|
||||
for platform_config in target_platforms:
|
||||
website, created = Website.objects.get_or_create(
|
||||
name=platform_config['name'],
|
||||
defaults={
|
||||
'base_url': platform_config['base_url'],
|
||||
'article_list_url': platform_config['start_url'],
|
||||
'article_selector': platform_config['article_selector']
|
||||
}
|
||||
)
|
||||
|
||||
# 确保更新已存在的网站对象的配置
|
||||
if not created:
|
||||
website.base_url = platform_config['base_url']
|
||||
website.article_list_url = platform_config['start_url']
|
||||
website.article_selector = platform_config['article_selector']
|
||||
website.save()
|
||||
|
||||
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
|
||||
full_site_crawler(platform_config['start_url'], website, max_pages=500)
|
||||
self.stdout.write(f"完成爬取: {platform_config['name']}")
|
||||
|
||||
self.stdout.write(self.style.SUCCESS("学习时报所有平台爬取完成"))
|
||||
59
core/management/commands/crawl_zgfnb.py
Normal file
59
core/management/commands/crawl_zgfnb.py
Normal file
@@ -0,0 +1,59 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
from core.models import Website
|
||||
from core.utils import full_site_crawler
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "全站递归爬取 中国妇女报及其子网站、客户端、新媒体平台"
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['zgfnb', 'mobile', 'all'],
|
||||
help='选择爬取平台: zgfnb(中国妇女报), mobile(移动端), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
# 中国妇女报各平台配置
|
||||
platforms = {
|
||||
'zgfnb': {
|
||||
'name': '中国妇女报',
|
||||
'base_url': 'http://www.cnwomen.com.cn',
|
||||
'start_url': 'http://www.cnwomen.com.cn',
|
||||
'article_selector': 'a'
|
||||
},
|
||||
'mobile': {
|
||||
'name': '中国妇女报移动端',
|
||||
'base_url': 'http://m.cnwomen.com.cn',
|
||||
'start_url': 'http://m.cnwomen.com.cn',
|
||||
'article_selector': 'a'
|
||||
}
|
||||
}
|
||||
|
||||
if platform == 'all':
|
||||
target_platforms = platforms.values()
|
||||
else:
|
||||
target_platforms = [platforms[platform]]
|
||||
|
||||
for platform_config in target_platforms:
|
||||
website, created = Website.objects.get_or_create(
|
||||
name=platform_config['name'],
|
||||
defaults={
|
||||
'base_url': platform_config['base_url'],
|
||||
'article_list_url': platform_config['start_url'],
|
||||
'article_selector': platform_config['article_selector']
|
||||
}
|
||||
)
|
||||
|
||||
# 确保更新已存在的网站对象的配置
|
||||
if not created:
|
||||
website.base_url = platform_config['base_url']
|
||||
website.article_list_url = platform_config['start_url']
|
||||
website.article_selector = platform_config['article_selector']
|
||||
website.save()
|
||||
|
||||
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
|
||||
full_site_crawler(platform_config['start_url'], website, max_pages=500)
|
||||
self.stdout.write(f"完成爬取: {platform_config['name']}")
|
||||
|
||||
self.stdout.write(self.style.SUCCESS("中国妇女报所有平台爬取完成"))
|
||||
59
core/management/commands/crawl_zgjwjc.py
Normal file
59
core/management/commands/crawl_zgjwjc.py
Normal file
@@ -0,0 +1,59 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
from core.models import Website
|
||||
from core.utils import full_site_crawler
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "全站递归爬取 中国纪检监察报及其子网站、客户端、新媒体平台"
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['zgjwjc', 'mobile', 'all'],
|
||||
help='选择爬取平台: zgjwjc(中国纪检监察报), mobile(移动端), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
# 中国纪检监察报各平台配置
|
||||
platforms = {
|
||||
'zgjwjc': {
|
||||
'name': '中国纪检监察报',
|
||||
'base_url': 'http://www.jjjcb.cn',
|
||||
'start_url': 'http://www.jjjcb.cn',
|
||||
'article_selector': 'a'
|
||||
},
|
||||
'mobile': {
|
||||
'name': '中国纪检监察报移动端',
|
||||
'base_url': 'http://m.jjjcb.cn',
|
||||
'start_url': 'http://m.jjjcb.cn',
|
||||
'article_selector': 'a'
|
||||
}
|
||||
}
|
||||
|
||||
if platform == 'all':
|
||||
target_platforms = platforms.values()
|
||||
else:
|
||||
target_platforms = [platforms[platform]]
|
||||
|
||||
for platform_config in target_platforms:
|
||||
website, created = Website.objects.get_or_create(
|
||||
name=platform_config['name'],
|
||||
defaults={
|
||||
'base_url': platform_config['base_url'],
|
||||
'article_list_url': platform_config['start_url'],
|
||||
'article_selector': platform_config['article_selector']
|
||||
}
|
||||
)
|
||||
|
||||
# 确保更新已存在的网站对象的配置
|
||||
if not created:
|
||||
website.base_url = platform_config['base_url']
|
||||
website.article_list_url = platform_config['start_url']
|
||||
website.article_selector = platform_config['article_selector']
|
||||
website.save()
|
||||
|
||||
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
|
||||
full_site_crawler(platform_config['start_url'], website, max_pages=500)
|
||||
self.stdout.write(f"完成爬取: {platform_config['name']}")
|
||||
|
||||
self.stdout.write(self.style.SUCCESS("中国纪检监察报所有平台爬取完成"))
|
||||
59
core/management/commands/crawl_zgqnb.py
Normal file
59
core/management/commands/crawl_zgqnb.py
Normal file
@@ -0,0 +1,59 @@
|
||||
from django.core.management.base import BaseCommand
|
||||
from core.models import Website
|
||||
from core.utils import full_site_crawler
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "全站递归爬取 中国青年报及其子网站、客户端、新媒体平台"
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--platform', type=str, default='all',
|
||||
choices=['zgqnb', 'mobile', 'all'],
|
||||
help='选择爬取平台: zgqnb(中国青年报), mobile(移动端), all(全部)')
|
||||
|
||||
def handle(self, *args, **options):
|
||||
platform = options['platform']
|
||||
|
||||
# 中国青年报各平台配置
|
||||
platforms = {
|
||||
'zgqnb': {
|
||||
'name': '中国青年报',
|
||||
'base_url': 'https://www.cyol.com',
|
||||
'start_url': 'https://www.cyol.com',
|
||||
'article_selector': 'a'
|
||||
},
|
||||
'mobile': {
|
||||
'name': '中国青年报移动端',
|
||||
'base_url': 'https://m.cyol.com',
|
||||
'start_url': 'https://m.cyol.com',
|
||||
'article_selector': 'a'
|
||||
}
|
||||
}
|
||||
|
||||
if platform == 'all':
|
||||
target_platforms = platforms.values()
|
||||
else:
|
||||
target_platforms = [platforms[platform]]
|
||||
|
||||
for platform_config in target_platforms:
|
||||
website, created = Website.objects.get_or_create(
|
||||
name=platform_config['name'],
|
||||
defaults={
|
||||
'base_url': platform_config['base_url'],
|
||||
'article_list_url': platform_config['start_url'],
|
||||
'article_selector': platform_config['article_selector']
|
||||
}
|
||||
)
|
||||
|
||||
# 确保更新已存在的网站对象的配置
|
||||
if not created:
|
||||
website.base_url = platform_config['base_url']
|
||||
website.article_list_url = platform_config['start_url']
|
||||
website.article_selector = platform_config['article_selector']
|
||||
website.save()
|
||||
|
||||
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
|
||||
full_site_crawler(platform_config['start_url'], website, max_pages=500)
|
||||
self.stdout.write(f"完成爬取: {platform_config['name']}")
|
||||
|
||||
self.stdout.write(self.style.SUCCESS("中国青年报所有平台爬取完成"))
|
||||
Reference in New Issue
Block a user