博主
258
258
258
258
专辑

第三十七节 使用vue-simple-uploader上传组件分片上传大文件+秒传

亮子 2022-12-14 01:30:14 3489 0 0 0

1、vue-simple-uploader上传组件介绍

vue-simple-uploader是基于 simple-uploader.js 封装的vue上传插件。它的优点包括且不限于以下几种:

支持文件、多文件、文件夹上传;支持拖拽文件、文件夹上传
可暂停、继续上传
错误处理
支持“秒传”,通过文件判断服务端是否已存在从而实现“秒传”
分块上传
支持进度、预估剩余时间、出错自动重试、重传等操作

# 源码地址:
https://github.com/simple-uploader/vue-uploader
# 案例:
https://gitee.com/luckytuan/fast-loader
  • 安装
npm install vue-simple-uploader --save

2、spark-md5库的介绍

1)、安装

npm install --save spark-md5

2)、使用方法

  • 一般文件

一种是用SparkMD5.hashBinary() 直接将整个文件的二进制码传入,直接返回文件的md5。

  • 体积较大的文件

一般我们做上传的时候推荐使用着一种方式,因为文件的体积不确定,这种方式对于大体积的文件计算更加稳定,还可以获得计算进度的信息。

函数定义

function md5(file, chunkSize) {
    let _this = this
    return new Promise((resolve, reject) => {
      let blobSlice =
        File.prototype.slice ||
        File.prototype.mozSlice ||
        File.prototype.webkitSlice;
      let chunks = Math.ceil(file.size / chunkSize);
      let currentChunk = 0;
      let spark = new SparkMD5.ArrayBuffer(); //追加数组缓冲区。
      let fileReader = new FileReader(); //读取文件
      fileReader.onload = function (e) {
        spark.append(e.target.result);
        currentChunk++;
        _this.md5Obj.percent = Math.floor((currentChunk / chunks) * 100);
        _this.container.file.MD5Progress = _this.md5Obj.percent
        if (_this.onMD5Progress(_this.container.file) === false) return;
        if (currentChunk < chunks) {
          loadNext();
        } else {
          _this.md5Obj.md5 = spark.end(); //完成md5的计算,返回十六进制结果。
          resolve(_this.md5Obj);
        }
      };

      fileReader.onerror = function (e) {
        reject(e);
      };

      function loadNext() {
        let start = currentChunk * chunkSize;
        let end = start + chunkSize;
        (end > file.size) && (end = file.size);
        fileReader.readAsArrayBuffer(blobSlice.call(file, start, end));
      }
      loadNext();
    });
  }

函数调用

      md5.call(this, file, this.chunkSize)
        .then(res => {
          // 获取到文件的md5
          setDatas.call(this, 'params.md5', res.md5);
          if (this.autoUpload) {
            if (this.onStartUpload(this.container.file) === false) return;
            handleUpload.call(this);
          }
        })
        .catch(res => {
          // 处理异常
          console.error(res);
        });//

3、Vue使用vue-simple-uploader上传文件

1)、安装vue-simple-uploader组件

npm install --save vue-simple-uploader

2)、修改main.js文件

在main.js文件中增加如下代码

import uploader from 'vue-simple-uploader'
Vue.use(uploader)

3)、安装 SparkMD5

npm install --save spark-md5

4)、vue前端代码

<template>
    <div>
        <div>
            <h3>大文件分片上传</h3>
        </div>
        <div>
            <uploader :auto-start="false" :options="options" :file-status-text="statusText" class="uploader-example"
                @file-complete="fileComplete" @complete="complete" @file-success="fileSuccess" @files-added="filesAdded">
                <uploader-unsupport></uploader-unsupport>
                <uploader-drop>
                    <p>将文件拖放到此处以上传</p>
                    <uploader-btn>选择文件</uploader-btn>
                    <uploader-btn :attrs="attrs">选择图片</uploader-btn>
                    <uploader-btn :directory="true">选择文件夹</uploader-btn>
                </uploader-drop>
                <!-- <uploader-list></uploader-list> -->
                <uploader-files> </uploader-files>
            </uploader>

            <br />
            <el-button :disabled="disabled" @click="allStart()">全部开始</el-button>
            <el-button style="margin-left: 4px" @click="allStop()">全部暂停</el-button>
            <el-button style="margin-left: 4px" @click="allRemove()">全部移除</el-button>
        </div>
    </div>
</template>

<script>
    import { mergeChunks } from '@/api/api.js'
    import SparkMD5 from "spark-md5";
    export default {
        name: 'UploadFilePage',
        data() {
            return {
                //判断是否已经上传过这个分片
                options: {
                    target: "http://localhost:8102/file/upload",
                    // 开启服务端分片校验功能
                    testChunks: true,
                    chunkSize: 1024 * 1024 * 10,
                    parseTimeRemaining: function(timeRemaining, parsedTimeRemaining) {
                        return parsedTimeRemaining
                            .replace(/\syears?/, "年")
                            .replace(/\days?/, "天")
                            .replace(/\shours?/, "小时")
                            .replace(/\sminutes?/, "分钟")
                            .replace(/\sseconds?/, "秒");
                    },
                    // 服务器分片校验函数
                    checkChunkUploadedByResponse: (chunk, message) => {
                        console.log(chunk)
                        console.log(message);
                        const result = JSON.parse(message);
                        console.log('checkChunkUploadedByResponse', result)
                        if (result.data.skipUpload) {
                            this.skip = true;
                            return true;
                        }
                        return (result.data.uploaded || []).indexOf(chunk.offset + 1) >= 0;
                    },
                },
                statusText: {
                    success: "上传成功",
                    error: "上传出错了",
                    uploading: "上传中...",
                    paused: "暂停中...",
                    waiting: "等待中...",
                    cmd5: "计算文件MD5中...",
                },
                attrs: {
                    accept: "image/*",
                },
                fileList: [],
                disabled: true,
                skip: false,
            }
        },
        mounted() {

        },
        methods: {
            fileComplete(rootFile) {
                // 一个根文件(文件夹)成功上传完成。
                // console.log("fileComplete", rootFile);
                console.log("fileComplete", rootFile);
            },
            complete() {
                // 上传完毕。
                console.log("complete");
            },
            filesAdded(file, fileList, event) {
                console.log(file,fileList,event);
                //循环对文件进行加密然后进行分片(因为是可以选择多文件的,所以要进行循环)
                file.forEach((e) => {
                    this.fileList.push(e);
                    this.computeMD5(e);
                });
            },
            //MD5加密分片
            computeMD5(file) {
                let fileReader = new FileReader();
                let time = new Date().getTime();
                let blobSlice =
                    File.prototype.slice ||
                    File.prototype.mozSlice ||
                    File.prototype.webkitSlice;
                let currentChunk = 0;
                const chunkSize = 1024 * 1024 * 10;
                let chunks = Math.ceil(file.size / chunkSize);
                let spark = new SparkMD5.ArrayBuffer();
                // 文件状态设为"计算MD5"
                file.cmd5 = true; //文件状态为“计算md5...”
                file.pause();
                loadNext();
                fileReader.onload = (e) => {
                    spark.append(e.target.result);
                    if (currentChunk < chunks) {
                        currentChunk++;
                        loadNext();
                        // 实时展示MD5的计算进度
                        console.log(`第${currentChunk}分片解析完成, 开始第${ currentChunk + 1 } / ${chunks}分片解析`);
                    } else {
                        let md5 = spark.end();
                        console.log(
                            `MD5计算完毕:${file.name} \nMD5:${md5} \n分片:${chunks} 大小:${
                          file.size
                        } 用时:${new Date().getTime() - time} ms`
                        );
                        spark.destroy(); //释放缓存
                        file.uniqueIdentifier = md5; //将文件md5赋值给文件唯一标识
                        file.cmd5 = false; //取消计算md5状态
                        file.resume(); //开始上传
                    }
                };
                fileReader.onerror = function() {
                    this.error(`文件${file.name}读取出错,请检查该文件`);
                    file.cancel();
                };

                function loadNext() {
                    let start = currentChunk * chunkSize;
                    let end =
                        start + chunkSize >= file.size ? file.size : start + chunkSize;
                    fileReader.readAsArrayBuffer(blobSlice.call(file.file, start, end));
                }
            },
            //文件全部上传成功进行合并
            fileSuccess(rootFile, file, response, chunk) {
                const result = JSON.parse(response);
                console.log('fileSuccess', result);
                console.log(result.success, this.skip);

                if (result.success && !this.skip) {
                    let param = {
                        identifier: file.uniqueIdentifier,
                        filename: file.name,
                        totalChunks: chunk.offset+1,
                    }
                    mergeChunks(param)
                        .then((res) => {
                            console.log('mergeChunks', res);
                            if (res.code == 200) {
                                console.log("上传成功", res);
                            } else {
                                console.log(res);
                            }
                        })
                } else {
                    console.log("上传成功,不需要合并");
                }
                if (this.skip) {
                    this.skip = false;
                }
            },
        }
    }
</script>

<style>
    .uploader-example {
        width: 100%;
        padding: 15px;
        margin: 0px auto 0;
        font-size: 12px;
        box-shadow: 0 0 10px rgba(0, 0, 0, 0.4);
    }

    .uploader-example .uploader-btn {
        margin-right: 4px;
    }

    .uploader-example .uploader-list {
        max-height: 440px;
        overflow: auto;
        overflow-x: hidden;
        overflow-y: auto;
    }
</style>
  • api定义
// 申请合并文件
export const mergeChunks = (data) => {
    return service({
        url: 'http://localhost:8102/file/merge',
        method: 'post',
        data
    })
};

5)、java后端代码

package com.shenma2005.upload.controller;

import com.shenma2005.comms.utils.ResultResponse;
import com.shenma2005.upload.mapper.TbUploadLocalFileMapper;
import com.shenma2005.upload.pojo.TbUploadLocalFile;
import com.shenma2005.upload.vo.MergeFileVueVo;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.util.MultiValueMap;
import org.springframework.web.bind.annotation.*;
import org.springframework.web.multipart.MultipartFile;
import org.springframework.web.multipart.MultipartHttpServletRequest;

import javax.servlet.http.HttpServletRequest;
import java.io.*;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.UUID;

@RestController
@Slf4j
@CrossOrigin
@RequestMapping(value = "/file")
public class UploadController {


    @Autowired
    RedisTemplate<String, Object> redisTemplate;

    @Autowired
    TbUploadLocalFileMapper tbUploadLocalFileMapper;

    /**
     * 秒传专用
     * @param request
     * @return
     */
    @GetMapping(value = "/upload")
    public ResultResponse uploadGetChunk(HttpServletRequest request) {
        //--1 接收参数
        String chunkNumber = request.getParameter("chunkNumber");
        String chunkSize = request.getParameter("chunkSize");
        String currentChunkSize = request.getParameter("currentChunkSize");
        String totalSize = request.getParameter("totalSize");
        String identifier = request.getParameter("identifier");
        String filename = request.getParameter("filename");
        String relativePath = request.getParameter("relativePath");
        String totalChunks = request.getParameter("totalChunks");

        //--2 秒传判断
        // TODO

        //--3 存储文件的全局信息
        String fileInfoKey = "FILE_INFO_KEY_" + identifier;
        redisTemplate.opsForHash().put(fileInfoKey, "chunkNumber", chunkNumber);
        redisTemplate.opsForHash().put(fileInfoKey, "chunkSize", chunkSize);
        redisTemplate.opsForHash().put(fileInfoKey, "currentChunkSize", currentChunkSize);
        redisTemplate.opsForHash().put(fileInfoKey, "totalSize", totalSize);
        redisTemplate.opsForHash().put(fileInfoKey, "identifier", identifier);
        redisTemplate.opsForHash().put(fileInfoKey, "filename", filename);
        redisTemplate.opsForHash().put(fileInfoKey, "relativePath", relativePath);
        redisTemplate.opsForHash().put(fileInfoKey, "totalChunks", totalChunks);

        //--4 返回信息
        HashMap<String, Object> map = new HashMap<>();
        map.put("skipUpload", false);

        return ResultResponse.SUCCESS(map);
    }

    /**
     * 接收文件分片
     * @param request
     * @return
     */
    @PostMapping(value = "/upload")
    public ResultResponse uploadChunk(HttpServletRequest request) throws IOException {

        String chunkNumber = request.getParameter("chunkNumber");

        log.info("chunkNumber={}", chunkNumber);

        String chunkSize = request.getParameter("chunkSize");
        String currentChunkSize = request.getParameter("currentChunkSize");
        String totalSize = request.getParameter("totalSize");
        String identifier = request.getParameter("identifier");
        String filename = request.getParameter("filename");
        String relativePath = request.getParameter("relativePath");
        String totalChunks = request.getParameter("totalChunks");

        MultipartHttpServletRequest multipartHttpServletRequest = (MultipartHttpServletRequest)request;
        MultipartFile file = multipartHttpServletRequest.getFile("file");

        //--4 存储上传文件
        String fileInfoKey = "FILE_INFO_KEY_" + identifier;
        String storeFolder = "D:\\temp\\upload";
        String tempFolder = storeFolder + File.separator + fileInfoKey;
        // 确保路径是存在的
        File tempFile = new File(tempFolder);
        if(!tempFile.exists()) {
            tempFile.mkdirs();
        }
        // 存储分片文件
        String fileName = tempFolder + File.separator +UUID.randomUUID().toString();
        log.info("fileName="+fileName);

        File chunkFile = new File(fileName);
        file.transferTo(chunkFile);


        //--5 存储每一个分片的信息
        String storeFileKey = "FILE_STORE_KEY_" + identifier;
        log.info("storeFileKey="+storeFileKey);
        redisTemplate.opsForHash().put(storeFileKey, chunkNumber, fileName);

        return ResultResponse.SUCCESS();
    }


    /**
     * 文件合并
     * @param mergeFileVueVo
     * @return
     */

    @PostMapping(value = "/merge")
    public ResultResponse uploadMerge(@RequestBody MergeFileVueVo mergeFileVueVo) throws Exception {

        log.info("vo="+mergeFileVueVo.toString());

        //--1 检查是否真的上传了所有分片
        String fileInfoKey = "FILE_INFO_KEY_" + mergeFileVueVo.getIdentifier();
        String chunks = (String)redisTemplate.opsForHash().get(fileInfoKey, "totalChunks");
        String totalSize = (String)redisTemplate.opsForHash().get(fileInfoKey, "totalSize");

        String storeFileKey = "FILE_STORE_KEY_" + mergeFileVueVo.getIdentifier();
        boolean ok = true;
        for (int index = 1; index <= Integer.valueOf(chunks); index++) {
            if(!redisTemplate.opsForHash().hasKey(storeFileKey, ""+index)) {
                ok = false;
                break;
            }
        }
        if(!ok) {
            throw new Exception("文件分片不全");
        }


        //--2 进行文件合并
        String storeFolder = "D:\\temp\\upload";
        String ext = mergeFileVueVo.getFilename().substring(mergeFileVueVo.getFilename().lastIndexOf(".")+1, mergeFileVueVo.getFilename().length());

        String fileName = storeFolder + File.separator + UUID.randomUUID().toString() + "." + ext;

        File resultFile = new File(fileName);
        BufferedOutputStream bufferedOutputStream = new BufferedOutputStream(new FileOutputStream(resultFile));

        int bufLen = 1024 * 4;
        byte[] buffer = new byte[bufLen];

        for (int index = 1; index <= Integer.valueOf(chunks); index++) {
            String tempFile = (String)redisTemplate.opsForHash().get(storeFileKey, ""+index);

            BufferedInputStream bufferedInputStream = new BufferedInputStream(new FileInputStream(tempFile));

            int readCount = bufferedInputStream.read(buffer);
            while(readCount > 0) {
                bufferedOutputStream.write(buffer, 0, readCount);

                readCount = bufferedInputStream.read(buffer);
            }
            bufferedInputStream.close();

            // 删除临时文件
            Files.delete(Paths.get(tempFile));
        }
        bufferedOutputStream.close();

        // 删除临时目录
        String tempFolder = storeFolder + File.separator + fileInfoKey;
        Files.deleteIfExists(Paths.get(tempFolder));


        //--3 删除缓存的信息
        redisTemplate.delete(fileInfoKey);
        redisTemplate.delete(storeFileKey);

        //--4 检查文件的完整性(计算并对比MD5)

        //--5 存入数据库
        TbUploadLocalFile tbUploadLocalFile = new TbUploadLocalFile();
        tbUploadLocalFile.setFileName(mergeFileVueVo.getFilename());
        tbUploadLocalFile.setFileExt(ext);
        tbUploadLocalFile.setFileSize(Long.valueOf(totalSize));
        tbUploadLocalFile.setFileType(ext);

        String name = fileName.substring(fileName.lastIndexOf("\\")+1, fileName.length());
        tbUploadLocalFile.setStoreName(name);

        tbUploadLocalFileMapper.insert(tbUploadLocalFile);

        return ResultResponse.SUCCESS();
    }
}
  • vo对象
package com.shenma2005.upload.vo;

import lombok.Data;

import java.io.Serializable;

@Data
public class MergeFileVueVo implements Serializable {
    private String filename;
    private String identifier;
    private Integer totalChunks;
}