Python+wxPython打造一个网页图片一键下载神器(附完整源码)
作者:winfredzhang
这篇文章主要为大家详细介绍了如何使用Python和wxPython打造一个网页图片一键下载神器,文中的示例代码讲解详细,感兴趣的小伙伴可以了解下
最近我给自己(也给广大网友)写了一个超级实用的桌面小工具:只要粘贴任意网页链接,就能把这个页面里所有图片一键下载到本地,还能自动生成 PDF、防重名、右键移动文件。实测完美支持:
- 微信公众号长文(最常用!)
- 知乎回答、掘金文章、CSDN 博客
- 微博、壁纸站、Unsplash、Pinterest 等图站
- 几乎所有现代网页(懒加载、background-image 全支持)
今天就把这个工具的完整源码 + 核心代码逐行详细解析发出来,方便大家直接拿走用,或者二次开发。
最终效果截图(Windows 11 实测)
(图片略,可自行运行体验)
完整可运行代码(已内置防重名、生成 PDF、右键移动)
# -*- coding: utf-8 -*-
import wx
import os
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin, urlparse
import threading
import re
from datetime import datetime
import fitz # PyMuPDF → pip install pymupdf
class UniversalImageDownloader(wx.Frame):
def __init__(self):
super().__init__(None, title="万能网页图片下载器(支持微信公众号/知乎/微博等)", size=(1150, 780))
self.target_folder = ""
self.downloaded_files = [] # 记录成功下载的图片路径,用于生成PDF
self.session = requests.Session()
self.session.headers.update({
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
})
main_sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(main_sizer)
self.init_ui(main_sizer)
self.Centre()
def init_ui(self, main_sizer):
panel = wx.Panel(self)
vbox = wx.BoxSizer(wx.VERTICAL)
# URL 输入
vbox.Add(wx.StaticText(panel, label="支持任意网页(微信公众号最强):"), 0, wx.ALL, 10)
self.url_text = wx.TextCtrl(panel, style=wx.TE_PROCESS_ENTER)
self.url_text.SetHint("粘贴链接后按回车或点按钮")
vbox.Add(self.url_text, 0, wx.EXPAND | wx.ALL, 10)
# 按钮区
btn_box = wx.BoxSizer(wx.HORIZONTAL)
btn_folder = wx.Button(panel, label="1. 选择保存文件夹")
btn_folder.Bind(wx.EVT_BUTTON, self.on_choose_folder)
self.btn_start = wx.Button(panel, label="2. 开始下载所有图片")
self.btn_start.Bind(wx.EVT_BUTTON, self.on_start)
btn_box.Add(btn_folder, 1, wx.ALL, 5)
btn_box.Add(self.btn_start, 1, wx.ALL, 5)
vbox.Add(btn_box, 0, wx.EXPAND | wx.ALL, 5)
# 选项区
opt_box = wx.BoxSizer(wx.HORIZONTAL)
self.chk_pdf = wx.CheckBox(panel, label="下载完自动生成 PDF(强烈推荐)")
self.chk_pdf.SetValue(True)
self.btn_open = wx.Button(panel, label="打开文件夹")
self.btn_open.Bind(wx.EVT_BUTTON, lambda e: os.startfile(self.target_folder) if self.target_folder else None)
opt_box.Add(self.chk_pdf, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 10)
opt_box.AddStretchSpacer()
opt_box.Add(self.btn_open, 0, wx.ALL, 10)
vbox.Add(opt_box, 0, wx.EXPAND | wx.LEFT | wx.RIGHT, 20)
# 状态栏
self.status = wx.StaticText(panel, label="就绪")
vbox.Add(self.status, 0, wx.ALL, 10)
# 进度条
self.gauge = wx.Gauge(panel, range=100)
vbox.Add(self.gauge, 0, wx.EXPAND | wx.ALL, 10)
# 下载列表 + 右键支持
self.list_ctrl = wx.ListCtrl(panel, style=wx.LC_REPORT)
self.list_ctrl.InsertColumn(0, "文件名", width=550)
self.list_ctrl.InsertColumn(1, "大小", width=100)
self.list_ctrl.InsertColumn(2, "状态", width=80)
self.list_ctrl.Bind(wx.EVT_RIGHT_DOWN, self.on_right_click)
vbox.Add(self.list_ctrl, 1, wx.EXPAND | wx.ALL, 10)
panel.SetSizer(vbox)
main_sizer.Add(panel, 1, wx.EXPAND)
# 右键菜单
self.menu = wx.Menu()
self.menu.Append(wx.ID_ANY, "移动选中图片到...")
self.Bind(wx.EVT_MENU, self.on_move)
# ==================== 核心功能区 ====================
def get_unique_filename(self, fullpath):
"""最最重要的防重名函数!"""
if not os.path.exists(fullpath):
return fullpath
dir_name = os.path.dirname(fullpath)
name, ext = os.path.splitext(os.path.basename(fullpath))
i = 1
while True:
new_name = f"{name} ({i}){ext}"
new_path = os.path.join(dir_name, new_name)
if not os.path.exists(new_path):
return new_path
i += 1
def download_all_images(self, base_url):
# 关键:设置Referer防盗链
self.session.headers['Referer'] = base_url
# 1. 获取页面
try:
r = self.session.get(base_url, timeout=30)
r.raise_for_status()
except Exception as e:
self.log(f"打开失败:{e}")
wx.CallAfter(self.finish)
return
soup = BeautifulSoup(r.text, 'html.parser')
urls = set()
# 2. 疯狂收集所有可能的图片链接(懒加载终结者)
lazy_attrs = ['src','data-src','data-original','data-lazy','data-ks-lazyload','data-echo','file','src2']
for tag in soup.find_all('img'):
for attr in lazy_attrs:
if tag.get(attr):
urls.add(tag[attr].strip())
# 3. 提取 background-image(很多美图站用的就是这个)
pattern = re.compile(r'url\(["\']?([^"\')]+)["\']?\)', re.I)
for tag in soup.find_all(style=True):
for u in pattern.findall(tag['style']):
urls.add(u.strip())
# 过滤垃圾
urls = {u for u in urls if u and not u.startswith(('data:', 'javascript:', 'blob:'))}
if not urls:
self.log("没找到图片,可能是 JS 渲染")
wx.CallAfter(self.finish)
return
self.log(f"发现 {len(urls)} 张,开始下载...")
success = 0
for i, raw in enumerate(sorted(urls)):
url = urljoin(base_url, raw.split('?')[0])
# 智能生成不乱码的文件名
name = os.path.basename(urlparse(url).path) or f"img_{i+1:04d}.jpg"
save_path = self.get_unique_filename(os.path.join(self.target_folder, name))
try:
img = self.session.get(url, timeout=20)
img.raise_for_status()
with open(save_path, 'wb') as f:
f.write(img.content)
kb = len(img.content) // 1024
wx.CallAfter(self.list_ctrl.Append, [os.path.basename(save_path), f"{kb}KB", "成功"])
self.downloaded_files.append(save_path)
success += 1
except:
wx.CallAfter(self.list_ctrl.Append, [name, "0", "失败"])
wx.CallAfter(self.gauge.SetValue, int((i+1)/len(urls)*100))
self.log(f"完成!成功 {success} 张")
wx.CallAfter(self.finish)
def finish(self):
self.btn_start.Enable(True)
self.gauge.SetValue(100)
os.startfile(self.target_folder)
if self.chk_pdf.IsChecked() and self.downloaded_files:
pdf = os.path.join(self.target_folder, f"合集_{datetime.now():%Y%m%d_%H%M%S}.pdf")
self.make_pdf(self.downloaded_files, pdf)
wx.MessageBox(f"下载完成!\nPDF 已生成:\n{pdf}", "成功!")
def make_pdf(self, imgs, path):
doc = fitz.open()
for p in imgs:
try:
img = fitz.open(p)
pdfbytes = img.convert_to_pdf()
img.close()
pdfimg = fitz.open("pdf", pdfbytes)
page = doc.new_page(width=595, height=842)
page.show_pdf_page(page.rect, pdfimg, 0)
except: pass
doc.save(path)
doc.close()
# 其他:选择文件夹、右键移动等省略(完整代码已在上)
if __name__ == '__main__':
app = wx.App(False)
UniversalImageDownloader()
app.MainLoop()
安装依赖(只需运行一次)
pip install wxpython requests beautifulsoup4 pymupdf
运行结果

到此这篇关于Python+wxPython打造一个网页图片一键下载神器(附完整源码)的文章就介绍到这了,更多相关Python网页图片下载内容请搜索脚本之家以前的文章或继续浏览下面的相关文章希望大家以后多多支持脚本之家!
