Add Support All Platform

This commit is contained in:
2025-08-14 23:42:16 +08:00
parent 4994310f14
commit ac98ac0057
23 changed files with 267 additions and 215 deletions

View File

@@ -8,17 +8,17 @@ class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--media', type=str, help='指定要爬取的媒体,用逗号分隔')
parser.add_argument('--platform', type=str, default='all',
help='指定平台类型: all(全部), web(网站), mobile(移动端)')
parser.add_argument('--platform', type=str, default='all',
help='指定平台类型: all(全部), web(网站), mobile(移动端)')
def handle(self, *args, **options):
media_list = options['media']
platform = options['platform']
# 所有中央主流媒体配置
all_media = {
'rmrb': 'crawl_rmrb',
'xinhua': 'crawl_xinhua',
'xinhua': 'crawl_xinhua',
'cctv': 'crawl_cctv',
'qiushi': 'crawl_qiushi',
'pla': 'crawl_pla',
@@ -39,15 +39,15 @@ class Command(BaseCommand):
'qizhi': 'crawl_qizhi',
'china': 'crawl_china'
}
# 如果指定了特定媒体,则只爬取指定的媒体
if media_list:
target_media = [media.strip() for media in media_list.split(',')]
else:
target_media = list(all_media.keys())
self.stdout.write(f"开始批量爬取 {len(target_media)} 家中央主流媒体...")
for media in target_media:
if media in all_media:
command_name = all_media[media]
@@ -59,17 +59,17 @@ class Command(BaseCommand):
self.stdout.write(self.style.ERROR(f"爬取 {media} 失败: {e}"))
else:
self.stdout.write(self.style.WARNING(f"未知媒体: {media}"))
self.stdout.write(self.style.SUCCESS("所有中央主流媒体爬取完成"))
# 显示统计信息
total_websites = Website.objects.count()
total_articles = sum([website.article_set.count() for website in Website.objects.all()])
self.stdout.write(f"统计信息:")
self.stdout.write(f"- 总网站数: {total_websites}")
self.stdout.write(f"- 总文章数: {total_articles}")
# 显示各媒体文章数量
self.stdout.write(f"各媒体文章数量:")
for website in Website.objects.all():

View File

@@ -7,13 +7,13 @@ class Command(BaseCommand):
help = "全站递归爬取 中央广播电视总台及其子网站、客户端、新媒体平台"
def add_arguments(self, parser):
parser.add_argument('--platform', type=str, default='all',
choices=['cctv', 'cctvnews', 'mobile', 'all'],
help='选择爬取平台: cctv(央视网), cctvnews(央视新闻), mobile(移动端), all(全部)')
parser.add_argument('--platform', type=str, default='all',
choices=['cctv', 'cctvnews', 'mobile', 'all'],
help='选择爬取平台: cctv(央视网), cctvnews(央视新闻), mobile(移动端), all(全部)')
def handle(self, *args, **options):
platform = options['platform']
# 中央广播电视总台各平台配置
platforms = {
'cctv': {
@@ -35,12 +35,12 @@ class Command(BaseCommand):
'article_selector': 'a'
}
}
if platform == 'all':
target_platforms = platforms.values()
else:
target_platforms = [platforms[platform]]
for platform_config in target_platforms:
website, created = Website.objects.get_or_create(
name=platform_config['name'],
@@ -50,16 +50,16 @@ class Command(BaseCommand):
'article_selector': platform_config['article_selector']
}
)
# 确保更新已存在的网站对象的配置
if not created:
website.base_url = platform_config['base_url']
website.article_list_url = platform_config['start_url']
website.article_selector = platform_config['article_selector']
website.save()
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
full_site_crawler(platform_config['start_url'], website, max_pages=500)
self.stdout.write(f"完成爬取: {platform_config['name']}")
self.stdout.write(self.style.SUCCESS("中央广播电视总台所有平台爬取完成"))

View File

@@ -7,13 +7,13 @@ class Command(BaseCommand):
help = "全站递归爬取 中国网主网及中国网一省份,不转发二级子网站"
def add_arguments(self, parser):
parser.add_argument('--platform', type=str, default='all',
choices=['china', 'province', 'all'],
help='选择爬取平台: china(中国网主网), province(中国网一省份), all(全部)')
parser.add_argument('--platform', type=str, default='all',
choices=['china', 'province', 'all'],
help='选择爬取平台: china(中国网主网), province(中国网一省份), all(全部)')
def handle(self, *args, **options):
platform = options['platform']
# 中国网各平台配置
platforms = {
'china': {
@@ -29,12 +29,12 @@ class Command(BaseCommand):
'article_selector': 'a'
}
}
if platform == 'all':
target_platforms = platforms.values()
else:
target_platforms = [platforms[platform]]
for platform_config in target_platforms:
website, created = Website.objects.get_or_create(
name=platform_config['name'],
@@ -44,16 +44,16 @@ class Command(BaseCommand):
'article_selector': platform_config['article_selector']
}
)
# 确保更新已存在的网站对象的配置
if not created:
website.base_url = platform_config['base_url']
website.article_list_url = platform_config['start_url']
website.article_selector = platform_config['article_selector']
website.save()
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
full_site_crawler(platform_config['start_url'], website, max_pages=500)
self.stdout.write(f"完成爬取: {platform_config['name']}")
self.stdout.write(self.style.SUCCESS("中国网所有平台爬取完成"))

View File

@@ -7,13 +7,13 @@ class Command(BaseCommand):
help = "全站递归爬取 中国日报及其子网站、客户端、新媒体平台"
def add_arguments(self, parser):
parser.add_argument('--platform', type=str, default='all',
choices=['chinadaily', 'mobile', 'all'],
help='选择爬取平台: chinadaily(中国日报), mobile(移动端), all(全部)')
parser.add_argument('--platform', type=str, default='all',
choices=['chinadaily', 'mobile', 'all'],
help='选择爬取平台: chinadaily(中国日报), mobile(移动端), all(全部)')
def handle(self, *args, **options):
platform = options['platform']
# 中国日报各平台配置
platforms = {
'chinadaily': {
@@ -29,12 +29,12 @@ class Command(BaseCommand):
'article_selector': 'a'
}
}
if platform == 'all':
target_platforms = platforms.values()
else:
target_platforms = [platforms[platform]]
for platform_config in target_platforms:
website, created = Website.objects.get_or_create(
name=platform_config['name'],
@@ -44,16 +44,16 @@ class Command(BaseCommand):
'article_selector': platform_config['article_selector']
}
)
# 确保更新已存在的网站对象的配置
if not created:
website.base_url = platform_config['base_url']
website.article_list_url = platform_config['start_url']
website.article_selector = platform_config['article_selector']
website.save()
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
full_site_crawler(platform_config['start_url'], website, max_pages=500)
self.stdout.write(f"完成爬取: {platform_config['name']}")
self.stdout.write(self.style.SUCCESS("中国日报所有平台爬取完成"))

View File

@@ -7,13 +7,13 @@ class Command(BaseCommand):
help = "全站递归爬取 中国新闻社及其子网站、客户端、新媒体平台"
def add_arguments(self, parser):
parser.add_argument('--platform', type=str, default='all',
choices=['chinanews', 'mobile', 'all'],
help='选择爬取平台: chinanews(中国新闻社), mobile(移动端), all(全部)')
parser.add_argument('--platform', type=str, default='all',
choices=['chinanews', 'mobile', 'all'],
help='选择爬取平台: chinanews(中国新闻社), mobile(移动端), all(全部)')
def handle(self, *args, **options):
platform = options['platform']
# 中国新闻社各平台配置
platforms = {
'chinanews': {
@@ -29,12 +29,12 @@ class Command(BaseCommand):
'article_selector': 'a'
}
}
if platform == 'all':
target_platforms = platforms.values()
else:
target_platforms = [platforms[platform]]
for platform_config in target_platforms:
website, created = Website.objects.get_or_create(
name=platform_config['name'],
@@ -44,16 +44,16 @@ class Command(BaseCommand):
'article_selector': platform_config['article_selector']
}
)
# 确保更新已存在的网站对象的配置
if not created:
website.base_url = platform_config['base_url']
website.article_list_url = platform_config['start_url']
website.article_selector = platform_config['article_selector']
website.save()
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
full_site_crawler(platform_config['start_url'], website, max_pages=500)
self.stdout.write(f"完成爬取: {platform_config['name']}")
self.stdout.write(self.style.SUCCESS("中国新闻社所有平台爬取完成"))

View File

@@ -7,13 +7,13 @@ class Command(BaseCommand):
help = "全站递归爬取 法治日报及其子网站、客户端、新媒体平台"
def add_arguments(self, parser):
parser.add_argument('--platform', type=str, default='all',
choices=['fzrb', 'mobile', 'all'],
help='选择爬取平台: fzrb(法治日报), mobile(移动端), all(全部)')
parser.add_argument('--platform', type=str, default='all',
choices=['fzrb', 'mobile', 'all'],
help='选择爬取平台: fzrb(法治日报), mobile(移动端), all(全部)')
def handle(self, *args, **options):
platform = options['platform']
# 法治日报各平台配置
platforms = {
'fzrb': {
@@ -29,12 +29,12 @@ class Command(BaseCommand):
'article_selector': 'a'
}
}
if platform == 'all':
target_platforms = platforms.values()
else:
target_platforms = [platforms[platform]]
for platform_config in target_platforms:
website, created = Website.objects.get_or_create(
name=platform_config['name'],
@@ -44,16 +44,16 @@ class Command(BaseCommand):
'article_selector': platform_config['article_selector']
}
)
# 确保更新已存在的网站对象的配置
if not created:
website.base_url = platform_config['base_url']
website.article_list_url = platform_config['start_url']
website.article_selector = platform_config['article_selector']
website.save()
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
full_site_crawler(platform_config['start_url'], website, max_pages=500)
self.stdout.write(f"完成爬取: {platform_config['name']}")
self.stdout.write(self.style.SUCCESS("法治日报所有平台爬取完成"))

View File

@@ -7,13 +7,13 @@ class Command(BaseCommand):
help = "全站递归爬取 光明日报及其子网站、客户端、新媒体平台"
def add_arguments(self, parser):
parser.add_argument('--platform', type=str, default='all',
choices=['gmrb', 'mobile', 'all'],
help='选择爬取平台: gmrb(光明日报), mobile(移动端), all(全部)')
parser.add_argument('--platform', type=str, default='all',
choices=['gmrb', 'mobile', 'all'],
help='选择爬取平台: gmrb(光明日报), mobile(移动端), all(全部)')
def handle(self, *args, **options):
platform = options['platform']
# 光明日报各平台配置
platforms = {
'gmrb': {
@@ -29,12 +29,12 @@ class Command(BaseCommand):
'article_selector': 'a'
}
}
if platform == 'all':
target_platforms = platforms.values()
else:
target_platforms = [platforms[platform]]
for platform_config in target_platforms:
website, created = Website.objects.get_or_create(
name=platform_config['name'],
@@ -44,16 +44,16 @@ class Command(BaseCommand):
'article_selector': platform_config['article_selector']
}
)
# 确保更新已存在的网站对象的配置
if not created:
website.base_url = platform_config['base_url']
website.article_list_url = platform_config['start_url']
website.article_selector = platform_config['article_selector']
website.save()
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
full_site_crawler(platform_config['start_url'], website, max_pages=500)
self.stdout.write(f"完成爬取: {platform_config['name']}")
self.stdout.write(self.style.SUCCESS("光明日报所有平台爬取完成"))

View File

@@ -7,13 +7,13 @@ class Command(BaseCommand):
help = "全站递归爬取 工人日报及其子网站、客户端、新媒体平台"
def add_arguments(self, parser):
parser.add_argument('--platform', type=str, default='all',
choices=['grrb', 'mobile', 'all'],
help='选择爬取平台: grrb(工人日报), mobile(移动端), all(全部)')
parser.add_argument('--platform', type=str, default='all',
choices=['grrb', 'mobile', 'all'],
help='选择爬取平台: grrb(工人日报), mobile(移动端), all(全部)')
def handle(self, *args, **options):
platform = options['platform']
# 工人日报各平台配置
platforms = {
'grrb': {
@@ -29,12 +29,12 @@ class Command(BaseCommand):
'article_selector': 'a'
}
}
if platform == 'all':
target_platforms = platforms.values()
else:
target_platforms = [platforms[platform]]
for platform_config in target_platforms:
website, created = Website.objects.get_or_create(
name=platform_config['name'],
@@ -44,16 +44,16 @@ class Command(BaseCommand):
'article_selector': platform_config['article_selector']
}
)
# 确保更新已存在的网站对象的配置
if not created:
website.base_url = platform_config['base_url']
website.article_list_url = platform_config['start_url']
website.article_selector = platform_config['article_selector']
website.save()
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
full_site_crawler(platform_config['start_url'], website, max_pages=500)
self.stdout.write(f"完成爬取: {platform_config['name']}")
self.stdout.write(self.style.SUCCESS("工人日报所有平台爬取完成"))
self.stdout.write(self.style.SUCCESS("工人日报所有平台爬取完成"))

View File

@@ -7,13 +7,13 @@ class Command(BaseCommand):
help = "全站递归爬取 经济日报及其子网站、客户端、新媒体平台"
def add_arguments(self, parser):
parser.add_argument('--platform', type=str, default='all',
choices=['jjrb', 'mobile', 'all'],
help='选择爬取平台: jjrb(经济日报), mobile(移动端), all(全部)')
parser.add_argument('--platform', type=str, default='all',
choices=['jjrb', 'mobile', 'all'],
help='选择爬取平台: jjrb(经济日报), mobile(移动端), all(全部)')
def handle(self, *args, **options):
platform = options['platform']
# 经济日报各平台配置
platforms = {
'jjrb': {
@@ -23,12 +23,12 @@ class Command(BaseCommand):
'article_selector': 'a'
},
}
if platform == 'all':
target_platforms = platforms.values()
else:
target_platforms = [platforms[platform]]
for platform_config in target_platforms:
website, created = Website.objects.get_or_create(
name=platform_config['name'],
@@ -38,16 +38,16 @@ class Command(BaseCommand):
'article_selector': platform_config['article_selector']
}
)
# 确保更新已存在的网站对象的配置
if not created:
website.base_url = platform_config['base_url']
website.article_list_url = platform_config['start_url']
website.article_selector = platform_config['article_selector']
website.save()
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
full_site_crawler(platform_config['start_url'], website, max_pages=500)
self.stdout.write(f"完成爬取: {platform_config['name']}")
self.stdout.write(self.style.SUCCESS("经济日报所有平台爬取完成"))

View File

@@ -7,13 +7,13 @@ class Command(BaseCommand):
help = "全站递归爬取 农民日报及其子网站、客户端、新媒体平台"
def add_arguments(self, parser):
parser.add_argument('--platform', type=str, default='all',
choices=['nmrb', 'mobile', 'all'],
help='选择爬取平台: nmrb(农民日报), mobile(移动端), all(全部)')
parser.add_argument('--platform', type=str, default='all',
choices=['nmrb', 'mobile', 'all'],
help='选择爬取平台: nmrb(农民日报), mobile(移动端), all(全部)')
def handle(self, *args, **options):
platform = options['platform']
# 农民日报各平台配置
platforms = {
'nmrb': {
@@ -29,12 +29,12 @@ class Command(BaseCommand):
'article_selector': 'a'
}
}
if platform == 'all':
target_platforms = platforms.values()
else:
target_platforms = [platforms[platform]]
for platform_config in target_platforms:
website, created = Website.objects.get_or_create(
name=platform_config['name'],
@@ -44,16 +44,16 @@ class Command(BaseCommand):
'article_selector': platform_config['article_selector']
}
)
# 确保更新已存在的网站对象的配置
if not created:
website.base_url = platform_config['base_url']
website.article_list_url = platform_config['start_url']
website.article_selector = platform_config['article_selector']
website.save()
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
full_site_crawler(platform_config['start_url'], website, max_pages=500)
self.stdout.write(f"完成爬取: {platform_config['name']}")
self.stdout.write(self.style.SUCCESS("农民日报所有平台爬取完成"))

View File

@@ -7,13 +7,13 @@ class Command(BaseCommand):
help = "全站递归爬取 解放军报及其子网站、客户端、新媒体平台"
def add_arguments(self, parser):
parser.add_argument('--platform', type=str, default='all',
choices=['pla', 'mobile', 'all'],
help='选择爬取平台: pla(解放军报), mobile(移动端), all(全部)')
parser.add_argument('--platform', type=str, default='all',
choices=['pla', 'mobile', 'all'],
help='选择爬取平台: pla(解放军报), mobile(移动端), all(全部)')
def handle(self, *args, **options):
platform = options['platform']
# 解放军报各平台配置
platforms = {
'pla': {
@@ -23,12 +23,12 @@ class Command(BaseCommand):
'article_selector': 'a'
},
}
if platform == 'all':
target_platforms = platforms.values()
else:
target_platforms = [platforms[platform]]
for platform_config in target_platforms:
website, created = Website.objects.get_or_create(
name=platform_config['name'],
@@ -38,16 +38,16 @@ class Command(BaseCommand):
'article_selector': platform_config['article_selector']
}
)
# 确保更新已存在的网站对象的配置
if not created:
website.base_url = platform_config['base_url']
website.article_list_url = platform_config['start_url']
website.article_selector = platform_config['article_selector']
website.save()
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
full_site_crawler(platform_config['start_url'], website, max_pages=500)
self.stdout.write(f"完成爬取: {platform_config['name']}")
self.stdout.write(self.style.SUCCESS("解放军报所有平台爬取完成"))

View File

@@ -7,13 +7,13 @@ class Command(BaseCommand):
help = "全站递归爬取 求是杂志及其子网站、客户端、新媒体平台"
def add_arguments(self, parser):
parser.add_argument('--platform', type=str, default='all',
choices=['qiushi', 'mobile', 'all'],
help='选择爬取平台: qiushi(求是网), mobile(移动端), all(全部)')
parser.add_argument('--platform', type=str, default='all',
choices=['qiushi', 'mobile', 'all'],
help='选择爬取平台: qiushi(求是网), mobile(移动端), all(全部)')
def handle(self, *args, **options):
platform = options['platform']
# 求是杂志各平台配置
platforms = {
'qiushi': {
@@ -29,12 +29,12 @@ class Command(BaseCommand):
'article_selector': 'a'
}
}
if platform == 'all':
target_platforms = platforms.values()
else:
target_platforms = [platforms[platform]]
for platform_config in target_platforms:
website, created = Website.objects.get_or_create(
name=platform_config['name'],
@@ -44,16 +44,16 @@ class Command(BaseCommand):
'article_selector': platform_config['article_selector']
}
)
# 确保更新已存在的网站对象的配置
if not created:
website.base_url = platform_config['base_url']
website.article_list_url = platform_config['start_url']
website.article_selector = platform_config['article_selector']
website.save()
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
full_site_crawler(platform_config['start_url'], website, max_pages=500)
self.stdout.write(f"完成爬取: {platform_config['name']}")
self.stdout.write(self.style.SUCCESS("求是杂志所有平台爬取完成"))

View File

@@ -7,13 +7,13 @@ class Command(BaseCommand):
help = "全站递归爬取 旗帜网及其子网站、客户端、新媒体平台"
def add_arguments(self, parser):
parser.add_argument('--platform', type=str, default='all',
choices=['qizhi', 'mobile', 'all'],
help='选择爬取平台: qizhi(旗帜网), mobile(移动端), all(全部)')
parser.add_argument('--platform', type=str, default='all',
choices=['qizhi', 'mobile', 'all'],
help='选择爬取平台: qizhi(旗帜网), mobile(移动端), all(全部)')
def handle(self, *args, **options):
platform = options['platform']
# 旗帜网各平台配置
platforms = {
'qizhi': {
@@ -29,12 +29,12 @@ class Command(BaseCommand):
'article_selector': 'a[href^="/"]' # 修改选择器以更好地匹配文章链接
}
}
if platform == 'all':
target_platforms = platforms.values()
else:
target_platforms = [platforms[platform]]
for platform_config in target_platforms:
website, created = Website.objects.get_or_create(
name=platform_config['name'],
@@ -44,16 +44,16 @@ class Command(BaseCommand):
'article_selector': platform_config['article_selector']
}
)
# 确保更新已存在的网站对象的配置
if not created:
website.base_url = platform_config['base_url']
website.article_list_url = platform_config['start_url']
website.article_selector = platform_config['article_selector']
website.save()
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
full_site_crawler(platform_config['start_url'], website, max_pages=500)
self.stdout.write(f"完成爬取: {platform_config['name']}")
self.stdout.write(self.style.SUCCESS("旗帜网所有平台爬取完成"))
self.stdout.write(self.style.SUCCESS("旗帜网所有平台爬取完成"))

View File

@@ -7,13 +7,13 @@ class Command(BaseCommand):
help = "全站递归爬取 人民日报及其子网站、客户端、新媒体平台"
def add_arguments(self, parser):
parser.add_argument('--platform', type=str, default='all',
choices=['peopleapp', 'people', 'paper', 'all'],
help='选择爬取平台: peopleapp(客户端), people(人民网), paper(报纸), all(全部)')
parser.add_argument('--platform', type=str, default='all',
choices=['peopleapp', 'people', 'paper', 'all'],
help='选择爬取平台: peopleapp(客户端), people(人民网), paper(报纸), all(全部)')
def handle(self, *args, **options):
platform = options['platform']
# 人民日报各平台配置
platforms = {
'peopleapp': {
@@ -35,12 +35,12 @@ class Command(BaseCommand):
'article_selector': 'a'
}
}
if platform == 'all':
target_platforms = platforms.values()
else:
target_platforms = [platforms[platform]]
for platform_config in target_platforms:
website, created = Website.objects.get_or_create(
name=platform_config['name'],
@@ -50,16 +50,16 @@ class Command(BaseCommand):
'article_selector': platform_config['article_selector']
}
)
# 确保更新已存在的网站对象的配置
if not created:
website.base_url = platform_config['base_url']
website.article_list_url = platform_config['start_url']
website.article_selector = platform_config['article_selector']
website.save()
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
full_site_crawler(platform_config['start_url'], website, max_pages=500)
self.stdout.write(f"完成爬取: {platform_config['name']}")
self.stdout.write(self.style.SUCCESS("人民日报所有平台爬取完成"))
self.stdout.write(self.style.SUCCESS("人民日报所有平台爬取完成"))

View File

@@ -50,4 +50,4 @@ class Command(BaseCommand):
full_site_crawler(platform_config['start_url'], website, max_pages=500)
self.stdout.write(f"完成爬取: {platform_config['name']}")
self.stdout.write(self.style.SUCCESS("人民政协网所有平台爬取完成"))
self.stdout.write(self.style.SUCCESS("人民政协网所有平台爬取完成"))

View File

@@ -7,13 +7,13 @@ class Command(BaseCommand):
help = "全站递归爬取 新华社及其子网站、客户端、新媒体平台"
def add_arguments(self, parser):
parser.add_argument('--platform', type=str, default='all',
choices=['news', 'xinhuanet', 'mobile', 'all'],
help='选择爬取平台: news(新华网), xinhuanet(新华网主站), mobile(移动端), all(全部)')
parser.add_argument('--platform', type=str, default='all',
choices=['news', 'xinhuanet', 'mobile', 'all'],
help='选择爬取平台: news(新华网), xinhuanet(新华网主站), mobile(移动端), all(全部)')
def handle(self, *args, **options):
platform = options['platform']
# 新华社各平台配置
platforms = {
'news': {
@@ -35,12 +35,12 @@ class Command(BaseCommand):
'article_selector': 'a'
}
}
if platform == 'all':
target_platforms = platforms.values()
else:
target_platforms = [platforms[platform]]
for platform_config in target_platforms:
website, created = Website.objects.get_or_create(
name=platform_config['name'],
@@ -50,16 +50,16 @@ class Command(BaseCommand):
'article_selector': platform_config['article_selector']
}
)
# 确保更新已存在的网站对象的配置
if not created:
website.base_url = platform_config['base_url']
website.article_list_url = platform_config['start_url']
website.article_selector = platform_config['article_selector']
website.save()
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
full_site_crawler(platform_config['start_url'], website, max_pages=500)
self.stdout.write(f"完成爬取: {platform_config['name']}")
self.stdout.write(self.style.SUCCESS("新华社所有平台爬取完成"))

View File

@@ -7,13 +7,13 @@ class Command(BaseCommand):
help = "全站递归爬取 学习强国中央媒体学习号及省级以上学习平台"
def add_arguments(self, parser):
parser.add_argument('--platform', type=str, default='all',
choices=['xuexi', 'central', 'provincial', 'all'],
help='选择爬取平台: xuexi(学习强国主站), central(中央媒体), provincial(省级平台), all(全部)')
parser.add_argument('--platform', type=str, default='all',
choices=['xuexi', 'central', 'provincial', 'all'],
help='选择爬取平台: xuexi(学习强国主站), central(中央媒体), provincial(省级平台), all(全部)')
def handle(self, *args, **options):
platform = options['platform']
# 学习强国各平台配置
platforms = {
'xuexi': {
@@ -35,12 +35,12 @@ class Command(BaseCommand):
'article_selector': 'a'
}
}
if platform == 'all':
target_platforms = platforms.values()
else:
target_platforms = [platforms[platform]]
for platform_config in target_platforms:
website, created = Website.objects.get_or_create(
name=platform_config['name'],
@@ -50,16 +50,16 @@ class Command(BaseCommand):
'article_selector': platform_config['article_selector']
}
)
# 确保更新已存在的网站对象的配置
if not created:
website.base_url = platform_config['base_url']
website.article_list_url = platform_config['start_url']
website.article_selector = platform_config['article_selector']
website.save()
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
full_site_crawler(platform_config['start_url'], website, max_pages=500)
self.stdout.write(f"完成爬取: {platform_config['name']}")
self.stdout.write(self.style.SUCCESS("学习强国所有平台爬取完成"))

View File

@@ -7,13 +7,13 @@ class Command(BaseCommand):
help = "全站递归爬取 学习时报及其子网站、客户端、新媒体平台"
def add_arguments(self, parser):
parser.add_argument('--platform', type=str, default='all',
choices=['xxsb', 'mobile', 'all'],
help='选择爬取平台: xxsb(学习时报), mobile(移动端), all(全部)')
parser.add_argument('--platform', type=str, default='all',
choices=['xxsb', 'mobile', 'all'],
help='选择爬取平台: xxsb(学习时报), mobile(移动端), all(全部)')
def handle(self, *args, **options):
platform = options['platform']
# 学习时报各平台配置
platforms = {
'xxsb': {
@@ -29,12 +29,12 @@ class Command(BaseCommand):
'article_selector': 'a'
}
}
if platform == 'all':
target_platforms = platforms.values()
else:
target_platforms = [platforms[platform]]
for platform_config in target_platforms:
website, created = Website.objects.get_or_create(
name=platform_config['name'],
@@ -44,16 +44,16 @@ class Command(BaseCommand):
'article_selector': platform_config['article_selector']
}
)
# 确保更新已存在的网站对象的配置
if not created:
website.base_url = platform_config['base_url']
website.article_list_url = platform_config['start_url']
website.article_selector = platform_config['article_selector']
website.save()
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
full_site_crawler(platform_config['start_url'], website, max_pages=500)
self.stdout.write(f"完成爬取: {platform_config['name']}")
self.stdout.write(self.style.SUCCESS("学习时报所有平台爬取完成"))

View File

@@ -7,13 +7,13 @@ class Command(BaseCommand):
help = "全站递归爬取 中国妇女报及其子网站、客户端、新媒体平台"
def add_arguments(self, parser):
parser.add_argument('--platform', type=str, default='all',
choices=['zgfnb', 'mobile', 'all'],
help='选择爬取平台: zgfnb(中国妇女报), mobile(移动端), all(全部)')
parser.add_argument('--platform', type=str, default='all',
choices=['zgfnb', 'mobile', 'all'],
help='选择爬取平台: zgfnb(中国妇女报), mobile(移动端), all(全部)')
def handle(self, *args, **options):
platform = options['platform']
# 中国妇女报各平台配置
platforms = {
'zgfnb': {
@@ -29,12 +29,12 @@ class Command(BaseCommand):
'article_selector': 'a'
}
}
if platform == 'all':
target_platforms = platforms.values()
else:
target_platforms = [platforms[platform]]
for platform_config in target_platforms:
website, created = Website.objects.get_or_create(
name=platform_config['name'],
@@ -44,16 +44,16 @@ class Command(BaseCommand):
'article_selector': platform_config['article_selector']
}
)
# 确保更新已存在的网站对象的配置
if not created:
website.base_url = platform_config['base_url']
website.article_list_url = platform_config['start_url']
website.article_selector = platform_config['article_selector']
website.save()
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
full_site_crawler(platform_config['start_url'], website, max_pages=500)
self.stdout.write(f"完成爬取: {platform_config['name']}")
self.stdout.write(self.style.SUCCESS("中国妇女报所有平台爬取完成"))

View File

@@ -7,13 +7,13 @@ class Command(BaseCommand):
help = "全站递归爬取 中国纪检监察报及其子网站、客户端、新媒体平台"
def add_arguments(self, parser):
parser.add_argument('--platform', type=str, default='all',
choices=['zgjwjc', 'mobile', 'all'],
help='选择爬取平台: zgjwjc(中国纪检监察报), mobile(移动端), all(全部)')
parser.add_argument('--platform', type=str, default='all',
choices=['zgjwjc', 'mobile', 'all'],
help='选择爬取平台: zgjwjc(中国纪检监察报), mobile(移动端), all(全部)')
def handle(self, *args, **options):
platform = options['platform']
# 中国纪检监察报各平台配置
platforms = {
'zgjwjc': {
@@ -29,12 +29,12 @@ class Command(BaseCommand):
'article_selector': 'a'
}
}
if platform == 'all':
target_platforms = platforms.values()
else:
target_platforms = [platforms[platform]]
for platform_config in target_platforms:
website, created = Website.objects.get_or_create(
name=platform_config['name'],
@@ -44,16 +44,16 @@ class Command(BaseCommand):
'article_selector': platform_config['article_selector']
}
)
# 确保更新已存在的网站对象的配置
if not created:
website.base_url = platform_config['base_url']
website.article_list_url = platform_config['start_url']
website.article_selector = platform_config['article_selector']
website.save()
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
full_site_crawler(platform_config['start_url'], website, max_pages=500)
self.stdout.write(f"完成爬取: {platform_config['name']}")
self.stdout.write(self.style.SUCCESS("中国纪检监察报所有平台爬取完成"))

View File

@@ -7,13 +7,13 @@ class Command(BaseCommand):
help = "全站递归爬取 中国青年报及其子网站、客户端、新媒体平台"
def add_arguments(self, parser):
parser.add_argument('--platform', type=str, default='all',
choices=['zgqnb', 'mobile', 'all'],
help='选择爬取平台: zgqnb(中国青年报), mobile(移动端), all(全部)')
parser.add_argument('--platform', type=str, default='all',
choices=['zgqnb', 'mobile', 'all'],
help='选择爬取平台: zgqnb(中国青年报), mobile(移动端), all(全部)')
def handle(self, *args, **options):
platform = options['platform']
# 中国青年报各平台配置
platforms = {
'zgqnb': {
@@ -29,12 +29,12 @@ class Command(BaseCommand):
'article_selector': 'a'
}
}
if platform == 'all':
target_platforms = platforms.values()
else:
target_platforms = [platforms[platform]]
for platform_config in target_platforms:
website, created = Website.objects.get_or_create(
name=platform_config['name'],
@@ -44,16 +44,16 @@ class Command(BaseCommand):
'article_selector': platform_config['article_selector']
}
)
# 确保更新已存在的网站对象的配置
if not created:
website.base_url = platform_config['base_url']
website.article_list_url = platform_config['start_url']
website.article_selector = platform_config['article_selector']
website.save()
self.stdout.write(f"开始爬取: {platform_config['name']} - {platform_config['start_url']}")
full_site_crawler(platform_config['start_url'], website, max_pages=500)
self.stdout.write(f"完成爬取: {platform_config['name']}")
self.stdout.write(self.style.SUCCESS("中国青年报所有平台爬取完成"))