java大文件上传处理方法实例代码
作者:jsonformat
前言
文件处理是业务中最常见的操作了,但对于个人来说,百兆大文件的处理还是挺少的,本文记录下大文件处理的主流处理方案,顺便更新下老旧API(File)。
一、前后端大文件上传
1.方案描述
当前主流的大文件上传方案以分片上传 + 断点续传 + 秒传为核心架构,以实现高效稳定上传。
- 秒传:根据文件的唯一标识如hash值校验服务端是否存在此文件,若存在则是秒传
- 分片上传:将大文件分割为多个小文件分开上传
- 断点续传:只用传未成功的分片
前端:秒传、文件分块、文件相关信息上传、获取文件已上传的分片、文件分片上传、文件分片合并。
后端:提供相应的功能接口,秒传校验、初始化文件信息、查询已上传的文件分片、分片上传、合并分片。增加批处理对规定时间范围内未完成上传的大文件进行邮件告警通知。
存储:创建临时目录存储各分片文件资源,数据表记录分片上传记录和文件基本信息,最终合并各分片写入指定目录文件中,更新数据表中的文件记录,删除分片信息。
2.后端代码
技术选型
java8+springboot2.0+mybatis
yaml配置
# 服务器端口 server: port: 8080 # 文件上传配置 file: upload: root-path: ./upload-files/ # 最终文件存储根目录 spring: servlet: multipart: enabled: true max-file-size: 20MB # 单个分片最大大小 max-request-size: 100MB # 单次请求最大大小 # 数据库配置(使用H2内存库,无需安装) datasource: url: jdbc:mysql://localhost:3306/my-test?characterEncoding=UTF-8&useSSL=false username: root password: 123456 driverClassName: com.mysql.jdbc.Driver mybatis: mapperLocations: classpath:/mapper/*.xml typeAliasesPackage: org.example.entity configuration: mapUnderscoreToCamelCase: true logging: level: org.example.dao: DEBUG
mysql table
CREATE TABLE `f_file_record` ( `id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT '自增ID', `file_id` varchar(64) NOT NULL COMMENT '文件唯一标识(UUID)', `file_name` varchar(255) NOT NULL COMMENT '原始文件名', `file_size` bigint(20) NOT NULL COMMENT '文件总大小(字节)', `file_hash` varchar(64) NOT NULL COMMENT '文件MD5哈希值(用于秒传)', `file_path` varchar(512) DEFAULT NULL COMMENT '最终文件存储路径', `chunk_total` int(11) NOT NULL COMMENT '总分片数', `chunk_size` int(11) NOT NULL COMMENT '单片大小(字节)', `status` tinyint(4) NOT NULL COMMENT '状态:0-上传中,1-已完成,2-失败', `create_time` datetime NOT NULL COMMENT '创建时间', `update_time` datetime NOT NULL COMMENT '更新时间', PRIMARY KEY (`id`), UNIQUE KEY `uk_file_id` (`file_id`) COMMENT '文件ID唯一索引', KEY `idx_file_hash` (`file_hash`) COMMENT '哈希索引(秒传查询用)' ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COMMENT='文件上传记录表'; CREATE TABLE `f_chunk_record` ( `id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT '自增ID', `file_id` varchar(64) NOT NULL COMMENT '关联文件ID', `chunk_index` int(11) NOT NULL COMMENT '分片索引(从0开始)', `chunk_path` varchar(512) NOT NULL COMMENT '分片临时存储路径', `chunk_size` bigint(20) NOT NULL COMMENT '分片实际大小(字节)', `create_time` datetime NOT NULL COMMENT '创建时间', PRIMARY KEY (`id`), UNIQUE KEY `uk_file_chunk` (`file_id`,`chunk_index`) COMMENT '文件+分片索引唯一(避免重复上传)', KEY `idx_file_id` (`file_id`) COMMENT '文件ID索引(查询分片列表用)' ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='文件分片记录表';
对应实体
@Data public class FileRecord { private String fileId; // 文件唯一标识(建议用UUID) private String fileName; // 原始文件名 private String fileHash; // 文件MD5哈希(用于秒传) private Long fileSize; // 文件总大小(字节) private Integer chunkTotal; // 总分片数 private Integer chunkSize; // 单片大小(字节) private String filePath; // 最终存储路径 private Integer status; // 状态:0-上传中 1-已完成 2-失败 private Date createTime; private Date updateTime; } @Data public class ChunkRecord { private Long id; private String fileId; // 关联文件ID private Integer chunkIndex; // 分片索引(从0开始) private String chunkPath; // 分片临时存储路径 private Long chunkSize; // 分片实际大小 private Date createTime; }
controller
@RestController @RequestMapping("/upload") public class FileUploadController { @Autowired private FileUploadService uploadService; /** * 秒传校验接口 */ @PostMapping("/check") public ResponseEntity<Map<String, Object>> checkFile(@RequestParam String fileHash, @RequestParam Long fileSize) { Map<String, Object> result = new HashMap<>(); FileRecord file = uploadService.checkFile(fileHash); if (file != null && Objects.equals(fileSize, file.getFileSize())) { result.put("success", true); result.put("exists", true); result.put("fileId", file.getFileId()); result.put("filePath", file.getFilePath()); } else { result.put("success", true); result.put("exists", false); } return ResponseEntity.ok(result); } /** * 初始化上传接口 */ @PostMapping("/init") public ResponseEntity<Map<String, Object>> initUpload( @RequestParam String fileName, @RequestParam Long fileSize, @RequestParam String fileHash, @RequestParam Integer chunkTotal, @RequestParam Integer chunkSize) { Map<String, Object> result = new HashMap<>(); try { String fileId = uploadService.initUpload(fileName, fileSize, fileHash, chunkTotal, chunkSize); result.put("success", true); result.put("fileId", fileId); return ResponseEntity.ok(result); } catch (Exception e) { result.put("success", false); result.put("msg", e.getMessage()); return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR).body(result); } } /** * 查询已上传分片接口 */ @GetMapping("/chunks/{fileId}") public ResponseEntity<Map<String, Object>> getUploadedChunks(@PathVariable String fileId) { Map<String, Object> result = new HashMap<>(); List<Integer> chunks = uploadService.getUploadedChunks(fileId); result.put("success", true); result.put("uploadedChunks", chunks); return ResponseEntity.ok(result); } /** * 分片上传接口 */ @PostMapping("/chunk") public ResponseEntity<Map<String, Object>> uploadChunk( @RequestParam String fileId, @RequestParam Integer chunkIndex, @RequestParam MultipartFile chunk) { Map<String, Object> result = new HashMap<>(); try { boolean success = uploadService.uploadChunk(fileId, chunkIndex, chunk); result.put("success", success); return ResponseEntity.ok(result); } catch (Exception e) { result.put("success", false); result.put("msg", e.getMessage()); return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR).body(result); } } /** * 合并分片接口 */ @PostMapping("/merge") public ResponseEntity<Map<String, Object>> mergeChunks(@RequestParam String fileId) { Map<String, Object> result = new HashMap<>(); try { FileRecord file = uploadService.mergeChunks(fileId); if (file != null) { result.put("success", true); result.put("fileId", file.getFileId()); result.put("filePath", file.getFilePath()); return ResponseEntity.ok(result); } else { result.put("success", false); result.put("msg", "合并失败"); return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR).body(result); } } catch (Exception e) { result.put("success", false); result.put("msg", e.getMessage()); return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR).body(result); } } }
service
public interface FileUploadService { // 检查文件是否已存在(秒传) FileRecord checkFile(String fileHash); // 初始化上传任务 String initUpload(String fileName, Long fileSize, String fileHash, Integer chunkTotal, Integer chunkSize); // 获取已上传的分片索引 List<Integer> getUploadedChunks(String fileId); // 上传分片 boolean uploadChunk(String fileId, Integer chunkIndex, MultipartFile chunkFile); // 合并分片 FileRecord mergeChunks(String fileId); } @Service public class FileUploadServiceImpl implements FileUploadService { // 最终文件存储根路径(配置在application.properties) @Value("${file.upload.root-path}") private String rootPath; // 临时分片存储路径 private final Path tempChunkPath; @Autowired private FileRecordMapper fileRepo; @Autowired private ChunkRecordMapper chunkRepo; // 初始化临时目录(使用NIO) public FileUploadServiceImpl() throws IOException { // 临时目录路径:系统临时目录 + upload-chunks tempChunkPath = Paths.get(System.getProperty("java.io.tmpdir"), "upload-chunks"); // 若目录不存在则创建(支持多级目录) Files.createDirectories(tempChunkPath); } /** * 上传分片:使用NIO的Files.copy替代传统File操作 */ @Override @Transactional public boolean uploadChunk(String fileId, Integer chunkIndex, MultipartFile chunkFile) { try { // 检查分片是否已存在 ChunkRecord existing = chunkRepo.findByFileIdAndChunkIndex(fileId, chunkIndex); if (existing != null) { return true; } // 构建分片存储路径(NIO Path) Path chunkDir = Paths.get(tempChunkPath.toString(), fileId); Files.createDirectories(chunkDir); // 创建目录(NIO方法) Path chunkPath = Paths.get(chunkDir.toString(), chunkIndex.toString()); // 使用NIO复制文件(替代transferTo) try (InputStream in = chunkFile.getInputStream()) { Files.copy(in, chunkPath, StandardCopyOption.REPLACE_EXISTING); } // 记录分片信息 ChunkRecord chunk = new ChunkRecord(); chunk.setFileId(fileId); chunk.setChunkIndex(chunkIndex); chunk.setChunkPath(chunkPath.toString()); // 存储路径字符串 chunk.setChunkSize(Files.size(chunkPath)); // 使用NIO获取文件大小 chunk.setCreateTime(new Date()); chunkRepo.save(chunk); return true; } catch (Exception e) { e.printStackTrace(); return false; } } /** * 合并分片:使用NIO的Path处理文件路径 */ @Override @Transactional public FileRecord mergeChunks(String fileId) { try { // 获取文件信息 FileRecord file = fileRepo.findById(fileId); if (file == null) { new RuntimeException("文件记录不存在"); } // 获取所有分片(按索引排序) List<ChunkRecord> chunks = chunkRepo.findByFileIdOrderByChunkIndexAsc(fileId); if (chunks.size() != file.getChunkTotal()) { throw new RuntimeException("分片不完整,无法合并"); } // 创建最终文件存储目录(按日期分目录,使用NIO) String dateDir = new Date().toString().substring(0, 10).replace(" ", "-"); Path saveDir = Paths.get(rootPath, dateDir); Files.createDirectories(saveDir); // NIO创建目录 // 生成最终文件名(UUID+原扩展名) String ext = file.getFileName().contains(".") ? file.getFileName().substring(file.getFileName().lastIndexOf(".")) : ""; String finalFileName = UUID.randomUUID().toString() + ext; Path finalPath = Paths.get(saveDir.toString(), finalFileName); // 合并分片(使用RandomAccessFile + NIO Path) try (RandomAccessFile raf = new RandomAccessFile(finalPath.toFile(), "rw")) { for (ChunkRecord chunk : chunks) { Path chunkPath = Paths.get(chunk.getChunkPath()); // NIO Path try (InputStream fis = Files.newInputStream(chunkPath)) { // NIO获取输入流 byte[] buffer = new byte[1024 * 1024]; // 1MB缓冲区 int len; while ((len = fis.read(buffer)) != -1) { raf.write(buffer, 0, len); } } } } // 更新文件记录 file.setFilePath(finalPath.toString()); file.setStatus(1); // 1-已完成 file.setUpdateTime(new Date()); fileRepo.update(file); // 清理临时分片(使用NIO删除) cleanTempChunks(fileId); return file; } catch (Exception e) { e.printStackTrace(); FileRecord f = fileRepo.findById(fileId); if (f != null) { f.setStatus(2); // 2-失败 fileRepo.update(f); } return null; } } /** * 清理临时分片:使用NIO的Files.walk递归删除 */ private void cleanTempChunks(String fileId) throws IOException { Path chunkDir = Paths.get(tempChunkPath.toString(), fileId); if (Files.exists(chunkDir)) { // 递归删除目录及内容(NIO方式) try (Stream<Path> stream = Files.walk(chunkDir)){ stream.sorted(Comparator.reverseOrder())// 逆序删除(先文件后目录) .forEach(path -> { try { Files.delete(path); } catch (IOException e) { e.printStackTrace(); } }); } } // 删除数据库分片记录 chunkRepo.deleteByFileId(fileId); } // 其他方法(checkFile/initUpload/getUploadedChunks)保持不变 @Override public FileRecord checkFile(String fileHash) { return fileRepo.findByFileHashAndStatus(fileHash, 1); } @Override @Transactional public String initUpload(String fileName, Long fileSize, String fileHash, Integer chunkTotal, Integer chunkSize) { FileRecord file = new FileRecord(); file.setFileId(UUID.randomUUID().toString().replace("-","")); file.setFileName(fileName); file.setFileSize(fileSize); file.setFileHash(fileHash); file.setChunkTotal(chunkTotal); file.setChunkSize(chunkSize); file.setStatus(0); file.setCreateTime(new Date()); file.setUpdateTime(new Date()); fileRepo.save(file); return file.getFileId(); } @Override public List<Integer> getUploadedChunks(String fileId) { List<ChunkRecord> chunks = chunkRepo.findByFileIdOrderByChunkIndexAsc(fileId); return chunks.stream() .map(ChunkRecord::getChunkIndex) .collect(Collectors.toList()); } }
mapper
//文件mapper public interface FileRecordMapper { // 通过文件哈希查询(用于秒传) FileRecord findByFileHashAndStatus(String fileHash, Integer status); FileRecord findById(String fileId); void update(FileRecord file); void save(FileRecord file); } //分片mapper public interface ChunkRecordMapper { // 查询文件的所有分片(按索引排序) List<ChunkRecord> findByFileIdOrderByChunkIndexAsc(String fileId); // 查询指定分片 ChunkRecord findByFileIdAndChunkIndex(String fileId, Integer chunkIndex); // 删除文件的所有分片 void deleteByFileId(String fileId); void save(ChunkRecord chunk); }
mapper-xml
------------------文件mapper------------------ <?xml version="1.0" encoding="UTF-8" ?> <!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd"> <mapper namespace="org.example.dao.FileRecordMapper"> <select id="findByFileHashAndStatus" resultType="org.example.entity.FileRecord"> select * from f_file_record where file_hash = #{fileHash} and status = #{status} </select> <select id="findById" resultType="org.example.entity.FileRecord"> select * from f_file_record where file_id = #{fileId} </select> <update id="update"> update f_file_record set status = #{status},file_path=#{filePath} where file_id = #{fileId} </update> <insert id="save"> insert into f_file_record (file_id,file_hash, file_name, file_size,chunk_total,chunk_size, status, create_time, update_time) values (#{fileId},#{fileHash},#{fileName},#{fileSize},#{chunkTotal},#{chunkSize},#{status},#{createTime},#{updateTime}) </insert> </mapper> ------------------分片mapper------------------ <?xml version="1.0" encoding="UTF-8" ?> <!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd"> <mapper namespace="org.example.dao.ChunkRecordMapper"> <select id="findByFileIdOrderByChunkIndexAsc" resultType="org.example.entity.ChunkRecord"> select * from f_chunk_record where file_id = #{fileId} order by chunk_index asc </select> <select id="findByFileIdAndChunkIndex" resultType="org.example.entity.ChunkRecord"> select * from f_chunk_record where file_id = #{fileId} and chunk_index = #{chunkIndex} </select> <delete id="deleteByFileId"> delete from f_chunk_record where file_id = #{fileId} </delete> <insert id="save"> insert into f_chunk_record (file_id, chunk_index, chunk_path, chunk_size, create_time) values (#{fileId}, #{chunkIndex}, #{chunkPath}, #{chunkSize}, #{createTime}) </insert> </mapper>
3.验证
这里使用ApiPost进行模拟测试
预处理文件
- 计算文件的hash值,用以实现秒传(Linux/Mac:md5sum “文件路径”)
- 计算分片:总片数=文件大小/每片大小(向上取整)
- 文件分片(Linux/Mac:split -b 10m test.zip chunk_ 将test.zip切割为10MB的分片,命名为chunk_aa、chunk_ab…)
接口调用
秒传校验
不存在
存在,秒传成功
文件初始化信息
需要对文件进行预处理
查询已上传的分片
分片上传
chunk表
临时目录存放的分片文件
合并分片
状态已改变
分片记录删除
临时目录中的分片文件也删除了
二、纯后端大文件处理
1.方案描述
后端处理百兆级大文件,可以使用java nio包中的FileChannel和ByteBuffer使用零拷贝技术上传,免去内核与用户态的切换节省CPU和内存资源。
2.后端代码
零拷贝
利用FileChannel.transferTo实现内核级数据传输,减少用户空间拷贝次数。
//服务端
public class ZeroCopyServer { public static void main(String[] args) throws Exception { Path destination = Paths.get("./upload-files/move/testdemo.zip"); int port = 8080; // 预先获取目标文件的父目录 Path parentDir = destination.getParent(); if (!Files.exists(parentDir)) { Files.createDirectories(parentDir); } try (ServerSocketChannel server = ServerSocketChannel.open()) { server.bind(new InetSocketAddress(port)); System.out.println("Server listening on port " + port); try (SocketChannel client = server.accept(); FileChannel outChannel = FileChannel.open( destination, StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING)) { long totalBytes = 0; long bytesTransferred; // 持续接收直到连接关闭 do { bytesTransferred = outChannel.transferFrom(client, totalBytes, Long.MAX_VALUE); if (bytesTransferred > 0) { totalBytes += bytesTransferred; System.out.printf("Received %.2f MB%n", bytesTransferred / (1024.0 * 1024.0)); } } while (bytesTransferred > 0); System.out.println("File transfer completed. Total size: " + totalBytes + " bytes"); } } } }
客户端
public static void main(String[] args) throws Exception { Path source = Paths.get("/Users/xxxxx/Downloads/books/testdemo.zip"); long chunkSize = 50 * 1024 * 1024;//分片大小 try (SocketChannel socket = SocketChannel.open(); FileChannel inChannel = FileChannel.open(source, StandardOpenOption.READ)) { // 设置连接 socket.socket().setSoTimeout(30000); socket.connect(new InetSocketAddress(InetAddress.getLocalHost(), 8080)); long fileSize = inChannel.size(); long position = 0; System.out.println("Starting file transfer. Total size: " + fileSize + " bytes"); while (position < fileSize) { long remaining = fileSize - position; long transferSize = Math.min(chunkSize, remaining); long transferred = inChannel.transferTo(position, transferSize, socket); if (transferred > 0) { position += transferred; System.out.printf("Sent %.2f MB (%.1f%%)%n", transferred / (1024.0 * 1024.0), (position * 100.0) / fileSize); } } // 优雅关闭输出(通知服务端传输结束) socket.shutdownOutput(); System.out.println("File upload completed"); } }
3.验证
- 启动服务端,执行客户端发送请求(注意大文件分片)
- 这里是简单的本地处理实现,实际业务中常用的影像资料上传或者日志文件处理可以参考使用(10M以上的)。
Starting file transfer. Total size: 455759002 bytes Sent 50.00 MB (11.5%) Sent 50.00 MB (23.0%) Sent 50.00 MB (34.5%) Sent 50.00 MB (46.0%) Sent 50.00 MB (57.5%) Sent 50.00 MB (69.0%) Sent 50.00 MB (80.5%) Sent 50.00 MB (92.0%) Sent 34.65 MB (100.0%) File upload completed
三、java文件API更替
jdk1.7 nio包中提供的文件处理类相对于File来说更安全便捷
原来用File对于文件或目录的操作(增、删、读、写、校验),现用Path和Files进行替换
新旧API对比
public class NioFileExamples { public static void main(String[] args) { String filePath = "./file-api/test01/test01.txt"; String copyPath = "./file-api/test02/test01copy.txt"; String dirPath = "./file-api-001/test01"; // 1. 文件读取示例 readFileExample(filePath); // 2. 文件写入示例 writeFileExample(filePath, "Hello, NIO! This is a test."); // 3. 文件复制示例 copyFileExample(filePath, copyPath); // 4. 目录创建示例 createDirectoryExample(dirPath); // 5. 列出目录内容示例 listDirectoryExample("./file-api-001"); // 6. 文件删除示例 deleteFileExample(copyPath); deleteFileExample(filePath); deleteDirectoryExample(dirPath); } /** * 文件读取示例:对比传统IO和NIO方式 */ private static void readFileExample(String filePath) { System.out.println("\n--- 文件读取示例 ---"); // 传统IO方式 try (BufferedReader br = new BufferedReader( new InputStreamReader(new FileInputStream(filePath), StandardCharsets.UTF_8))) { String line; System.out.println("传统IO读取:"); while ((line = br.readLine()) != null) { System.out.println(line); } } catch (IOException e) { System.out.println("传统IO读取失败: " + e.getMessage()); } // NIO方式 try { // 读取所有行 List<String> lines = Files.readAllLines(Paths.get(filePath), StandardCharsets.UTF_8); System.out.println("\nNIO读取所有行:"); lines.forEach(System.out::println); // 流式读取 System.out.println("\nNIO流式读取:"); Files.lines(Paths.get(filePath), StandardCharsets.UTF_8) .forEach(System.out::println); } catch (IOException e) { System.out.println("NIO读取失败: " + e.getMessage()); } } /** * 文件写入示例:对比传统IO和NIO方式 */ private static void writeFileExample(String filePath, String content) { System.out.println("\n--- 文件写入示例 ---"); // 传统IO方式 try (BufferedWriter bw = new BufferedWriter( new OutputStreamWriter(new FileOutputStream(filePath), StandardCharsets.UTF_8))) { bw.write(content); System.out.println("传统IO写入成功"); } catch (IOException e) { System.out.println("传统IO写入失败: " + e.getMessage()); } // NIO方式 - 写入字符串 try { Files.write(Paths.get(filePath), content.getBytes(StandardCharsets.UTF_8)); System.out.println("NIO写入字符串成功"); } catch (IOException e) { System.out.println("NIO写入字符串失败: " + e.getMessage()); } // NIO方式 - 写入多行 List<String> lines = Arrays.asList("第一行", "第二行", "第三行"); try { Files.write(Paths.get(filePath), lines, StandardCharsets.UTF_8); System.out.println("NIO写入多行成功"); } catch (IOException e) { System.out.println("NIO写入多行失败: " + e.getMessage()); } } /** * 文件复制示例:对比传统IO和NIO方式 */ private static void copyFileExample(String sourcePath, String targetPath) { System.out.println("\n--- 文件复制示例 ---"); // 传统IO方式 try (InputStream is = new FileInputStream(sourcePath); OutputStream os = new FileOutputStream(targetPath)) { byte[] buffer = new byte[1024]; int length; while ((length = is.read(buffer)) > 0) { os.write(buffer, 0, length); } System.out.println("传统IO复制成功"); } catch (IOException e) { System.out.println("传统IO复制失败: " + e.getMessage()); } // NIO方式 try { Files.copy(Paths.get(sourcePath), Paths.get(targetPath), StandardCopyOption.REPLACE_EXISTING); System.out.println("NIO复制成功"); } catch (IOException e) { System.out.println("NIO复制失败: " + e.getMessage()); } } /** * 目录创建示例:对比传统IO和NIO方式 */ private static void createDirectoryExample(String dirPath) { System.out.println("\n--- 目录创建示例 ---"); // 传统IO方式 File dir = new File(dirPath); if (dir.mkdirs()) { System.out.println("传统IO创建目录成功"); } else { System.out.println("传统IO创建目录失败或目录已存在"); } // NIO方式 try { Files.createDirectories(Paths.get(dirPath)); System.out.println("NIO创建目录成功"); } catch (IOException e) { System.out.println("NIO创建目录失败: " + e.getMessage()); } } /** * 列出目录内容示例:对比传统IO和NIO方式 */ private static void listDirectoryExample(String dirPath) { System.out.println("\n--- 列出目录内容示例 ---"); // 传统IO方式 File dir = new File(dirPath); String[] files = dir.list(); if (files != null) { System.out.println("传统IO列出目录内容:"); for (String file : files) { System.out.println(file); } } // NIO方式 try (DirectoryStream<Path> stream = Files.newDirectoryStream(Paths.get(dirPath))) { System.out.println("\nNIO列出目录内容:"); for (Path path : stream) { System.out.println(path.getFileName()); } } catch (IOException e) { System.out.println("NIO列出目录内容失败: " + e.getMessage()); } } /** * 文件删除示例:对比传统IO和NIO方式 */ private static void deleteFileExample(String filePath) { System.out.println("\n--- 文件删除示例 ---"); // 传统IO方式 File file = new File(filePath); if (file.delete()) { System.out.println("传统IO删除文件成功"); } else { System.out.println("传统IO删除文件失败或文件不存在"); } // NIO方式 try { Files.deleteIfExists(Paths.get(filePath)); System.out.println("NIO删除文件成功"); } catch (IOException e) { System.out.println("NIO删除文件失败: " + e.getMessage()); } } /** * 目录删除示例:NIO方式(传统方式需要递归实现) */ private static void deleteDirectoryExample(String dirPath) { System.out.println("\n--- 目录删除示例 ---"); try { // NIO删除目录(包括目录中的内容) Files.walk(Paths.get(dirPath)) .sorted(Comparator.reverseOrder()) // 逆序排序,先删除文件再删除目录 .forEach(path -> { try { Files.delete(path); } catch (IOException e) { System.out.println("删除失败: " + path + " - " + e.getMessage()); } }); System.out.println("NIO删除目录成功"); } catch (IOException e) { System.out.println("NIO删除目录失败: " + e.getMessage()); } } }
总结
- 大文件上传若是用户行为,跟前端配合使用分块上传处理
- 大文件上传若是后端行为,可以使用FileChannel等零拷贝技术或者直接内存映射技术
- 推荐使用java nio包中Path、Files来替代File对象进行文件操作和流操作,安全又便捷。
到此这篇关于java大文件上传处理方法的文章就介绍到这了,更多相关java大文件上传内容请搜索脚本之家以前的文章或继续浏览下面的相关文章希望大家以后多多支持脚本之家!