Support first case: 1. Add filters in website; 2. Add export all file in admin
This commit is contained in:
@@ -297,6 +297,77 @@ class ArticleAdmin(admin.ModelAdmin):
|
||||
}),
|
||||
)
|
||||
|
||||
# 添加导出选中文章的操作
|
||||
actions = ['export_selected_articles']
|
||||
|
||||
def export_selected_articles(self, request, queryset):
|
||||
"""
|
||||
导出选中的文章为ZIP文件
|
||||
"""
|
||||
import zipfile
|
||||
from django.http import HttpResponse
|
||||
from io import BytesIO
|
||||
from django.conf import settings
|
||||
import os
|
||||
from bs4 import BeautifulSoup
|
||||
from docx import Document
|
||||
|
||||
# 创建内存中的ZIP文件
|
||||
zip_buffer = BytesIO()
|
||||
|
||||
with zipfile.ZipFile(zip_buffer, 'w') as zip_file:
|
||||
# 为每篇文章创建文件夹并添加内容
|
||||
for article in queryset:
|
||||
# 创建文章文件夹名称
|
||||
article_folder = f"article_{article.id}_{article.title.replace('/', '_').replace('\\', '_').replace(':', '_').replace('*', '_').replace('?', '_').replace('"', '_').replace('<', '_').replace('>', '_').replace('|', '_')}"
|
||||
|
||||
# 创建Word文档
|
||||
doc = Document()
|
||||
doc.add_heading(article.title, 0)
|
||||
|
||||
# 添加文章信息
|
||||
doc.add_paragraph(f"网站: {article.website.name if article.website else ''}")
|
||||
doc.add_paragraph(f"URL: {article.url}")
|
||||
doc.add_paragraph(f"发布时间: {article.pub_date.strftime('%Y-%m-%d %H:%M:%S') if article.pub_date else ''}")
|
||||
doc.add_paragraph(f"创建时间: {article.created_at.strftime('%Y-%m-%d %H:%M:%S') if article.created_at else ''}")
|
||||
|
||||
# 添加内容标题
|
||||
doc.add_heading('内容:', level=1)
|
||||
|
||||
# 处理HTML内容
|
||||
soup = BeautifulSoup(article.content, 'html.parser')
|
||||
content_text = soup.get_text()
|
||||
doc.add_paragraph(content_text)
|
||||
|
||||
# 将Word文档保存到内存中
|
||||
doc_buffer = BytesIO()
|
||||
doc.save(doc_buffer)
|
||||
doc_buffer.seek(0)
|
||||
|
||||
# 将Word文档添加到ZIP文件
|
||||
zip_file.writestr(os.path.join(article_folder, f'{article.title.replace("/", "_")}.docx'), doc_buffer.getvalue())
|
||||
|
||||
# 添加媒体文件到ZIP包
|
||||
if article.media_files:
|
||||
for media_file in article.media_files:
|
||||
try:
|
||||
full_path = os.path.join(settings.MEDIA_ROOT, media_file)
|
||||
if os.path.exists(full_path):
|
||||
# 添加文件到ZIP包
|
||||
zip_file.write(full_path, os.path.join(article_folder, 'media', os.path.basename(media_file)))
|
||||
except Exception as e:
|
||||
# 如果添加媒体文件失败,继续处理其他文件
|
||||
pass
|
||||
|
||||
# 创建HttpResponse
|
||||
zip_buffer.seek(0)
|
||||
response = HttpResponse(zip_buffer.getvalue(), content_type='application/zip')
|
||||
response['Content-Disposition'] = 'attachment; filename=selected_articles.zip'
|
||||
|
||||
return response
|
||||
|
||||
export_selected_articles.short_description = "导出所选的文章为ZIP"
|
||||
|
||||
def content_preview(self, obj):
|
||||
"""内容预览"""
|
||||
return obj.content[:100] + '...' if len(obj.content) > 100 else obj.content
|
||||
@@ -340,44 +411,69 @@ class ArticleAdmin(admin.ModelAdmin):
|
||||
actions_column.short_description = '操作'
|
||||
|
||||
|
||||
class CrawlerStatusAdmin(admin.ModelAdmin):
|
||||
"""爬虫状态管理"""
|
||||
change_list_template = 'admin/crawler_status.html'
|
||||
|
||||
def changelist_view(self, request, extra_context=None):
|
||||
"""爬虫状态视图"""
|
||||
# 获取分布式爬虫状态
|
||||
nodes = distributed_crawler.get_available_nodes()
|
||||
node_statuses = []
|
||||
|
||||
for node_id in nodes:
|
||||
status = distributed_crawler.get_node_status(node_id)
|
||||
node_statuses.append(status)
|
||||
|
||||
# 获取最近的批次
|
||||
batches = distributed_crawler.get_all_batches()[:10]
|
||||
|
||||
# 获取任务统计
|
||||
task_stats = {
|
||||
'active_tasks': len([n for n in node_statuses if n['active_tasks'] > 0]),
|
||||
'total_nodes': len(nodes),
|
||||
'total_batches': len(batches),
|
||||
}
|
||||
|
||||
extra_context = extra_context or {}
|
||||
extra_context.update({
|
||||
'nodes': node_statuses,
|
||||
'batches': batches,
|
||||
'task_stats': task_stats,
|
||||
})
|
||||
|
||||
return super().changelist_view(request, extra_context)
|
||||
|
||||
#class CrawlerStatusAdmin(admin.ModelAdmin):
|
||||
# """爬虫状态管理"""
|
||||
# change_list_template = 'admin/crawler_status.html'
|
||||
#
|
||||
# def changelist_view(self, request, extra_context=None):
|
||||
# """爬虫状态视图"""
|
||||
# # 获取分布式爬虫状态
|
||||
# nodes = distributed_crawler.get_available_nodes()
|
||||
# node_statuses = []
|
||||
#
|
||||
# for node_id in nodes:
|
||||
# status = distributed_crawler.get_node_status(node_id)
|
||||
# node_statuses.append(status)
|
||||
#
|
||||
# # 获取最近的批次
|
||||
# batches = distributed_crawler.get_all_batches()[:10]
|
||||
#
|
||||
# # 获取任务统计
|
||||
# task_stats = {
|
||||
# 'active_tasks': len([n for n in node_statuses if n['active_tasks'] > 0]),
|
||||
# 'total_nodes': len(nodes),
|
||||
# 'total_batches': len(batches),
|
||||
# }
|
||||
#
|
||||
# extra_context = extra_context or {}
|
||||
# extra_context.update({
|
||||
# 'nodes': node_statuses,
|
||||
# 'batches': batches,
|
||||
# 'task_stats': task_stats,
|
||||
# })
|
||||
#
|
||||
# return super().changelist_view(request, extra_context)
|
||||
#
|
||||
|
||||
# 注册管理类
|
||||
admin.site.register(Website, WebsiteAdmin)
|
||||
admin.site.register(Article, ArticleAdmin)
|
||||
|
||||
|
||||
# 隐藏Celery Results管理功能
|
||||
# 禁用django_celery_results应用的自动注册
|
||||
try:
|
||||
from django_celery_results.models import TaskResult, GroupResult
|
||||
from django_celery_results.admin import TaskResultAdmin, GroupResultAdmin
|
||||
admin.site.unregister(TaskResult)
|
||||
admin.site.unregister(GroupResult)
|
||||
except:
|
||||
pass
|
||||
|
||||
# 隐藏Celery Beat周期任务管理功能
|
||||
# 禁用django_celery_beat应用的自动注册
|
||||
try:
|
||||
from django_celery_beat.models import PeriodicTask, ClockedSchedule, CrontabSchedule, SolarSchedule, IntervalSchedule
|
||||
admin.site.unregister(PeriodicTask)
|
||||
admin.site.unregister(ClockedSchedule)
|
||||
admin.site.unregister(CrontabSchedule)
|
||||
admin.site.unregister(SolarSchedule)
|
||||
admin.site.unregister(IntervalSchedule)
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
|
||||
# 自定义管理站点标题
|
||||
admin.site.site_header = 'Green Classroom 管理系统'
|
||||
admin.site.site_title = 'Green Classroom'
|
||||
|
||||
Reference in New Issue
Block a user