大文件上传—— 拖拽选择文件、切片上传、断点续传、异步任务并发数控制

效果展示

在这里插入图片描述

技能栈

vue+ts+element+egg

拖拽选择文件

<div ref="drag" id="drag">
      <input type="file" @change="handleChange" />
</div>
#drag {
  height: 100px;
  line-height: 100px;
  border: 2px dashed #eee;
  text-align: center;
}
async mounted() {
 this.bindEvents()
}
bindEvents() {
  const drag: any = this.$refs.drag;
  //鼠标拖到上面,注意清除默认事件
  drag.addEventListener("dragover", (e: any) => {
    drag.style.borderColor = "red";
    e.preventDefault()
  });
  //鼠标移开
  drag.addEventListener("dragleave", (e: any) => {
    drag.style.borderColor = "#ccc";
    e.preventDefault()
  });
  //鼠标拖到上面放开
  drag.addEventListener("drop", (e: any) => {
    const fileList: Array<any> = e.dataTransfer.files;
    this.file = fileList[0];
    drag.style.borderColor = "#ccc";
    e.preventDefault()
  });
}

在这里插入图片描述

文件类别判断

一般上传文件,我们只是简单的通过后缀名的方式去识别,但是很容易通过篡改后缀名而越过。一般文件转为十六进制之后,相同文件的首部和尾部的几个字节是固定的,可以依靠这个去进行识别。

//文件内容转为16进制
async blobToString(blob) {
 return new Promise(resolve => {
   const reader = new FileReader();
   reader.onload = function() {
     const ret = reader.result.split('')
       .map(v => v.charCodeAt()) //返回指定位置的字符的 Unicode 编码
       .map(v => v.toString(16).toUpperCase())
       .map(v => v.padStart(2, "0")) //字符串补全
       .join(" ")
     resolve(ret)
   };
   reader.readAsBinaryString(blob);
 });
}
//判断是否为png图片
async isPng(file) {
 const ret = await this.blobToString(file.slice(0, 8));
 const isPng = ret == "89 50 4E 47 0D 0A 1A 0A";
 return isPng;
}
//判断是否为jpg图片
async isJpg(file){
 const len = file.size
 const start = await this.blobToString(file.slice(0, 2))
 const tail = await this.blobToString(file.slice(-2, len))
 const isJpg = (start == 'FF D8') && (tail == 'FF D9')
 return isJpg
}
//通过文件流来判断文件格式是否符合
async isImage(file) {
 return await this.isPng(file) || await this.isJpg(file)
}

//对所选文件进行判断
if(!await this.isImage(this.file)){
    this.$alert("文件格式不对")
    return
}else{
    console.log("文件格式正确")
}

普通上传

不考虑文件大小,选择文件之后通过formData方式上传,可以配上进度条

<h2>文件上传</h2>
<div ref="drag" id="drag">
  <input type="file" @change="handleChange" />
</div>
<div>上传进度条</div>
<div>
    <el-progress :stroke-width='20' :text-inside="true" :percentage="uploadProgress"></el-progress>
</div>

vue前端

file: any
uploadProgress: any = 0
//文件整体上传
const form = new FormData()
form.append('name','file')
form.append('file',this.file)
http.post('/uploadFile',form,{
  onUploadProgress:progress=>{
    this.uploadProgress = Number(((progress.loaded/progress.total)*100).toFixed(2))
  }
})

egg后端处理FormData文件
这块可详细参考这儿(egg上传文件处理案例

const fs = require('fs')
const path = require('path')
const pump = require('mz-modules/pump')
async uploadFile(){
	//整体上传
    const stream = await this.ctx.getFileStream();
    const filename = encodeURIComponent(stream.fields.name) + path.extname(stream.filename).toLowerCase();
    const target = path.join(this.config.baseDir, 'app/public', filename);
    const writeStream = fs.createWriteStream(target);
    await pump(stream,writeStream)
    this.message("上传成功")
}

文件过大之后,采用整体上传的方式是极不友好的,很慢而且容易失败,失败之后还需要重新来过,故而有了下面几种方式去解决。

切片上传

文件切片处理
切片,即将大文件切割为多个小块

const CHUNK_SIZE = 50*1024//小块代销
//切片处理
createFileChunk(file,size=CHUNK_SIZE){
  const chunks = []
  let cur = 0
  while(cur<file.size){
    chunks.push({index:cur,file:file.slice(cur,cur+size)})
    cur+=size
  }
  return chunks
}

切片完成之后,我们需要给这些切片起上名称,顺序标记好,否则后端在切片合成的时候极容易乱掉。这块将使用文件的(md5加密hash值+切片顺序)作为名称。
大文件的hash计算也是一项费时的操作,这儿提供了三种方式去解决。
webWorker方式
利用js的新特性,重新启动一个线程去计算hash,则不会影响主线程。不过在vue项目中,新建的worker需要放置于静态文件目录下。
webWorker用法
在这里插入图片描述
主线程

//webWorker计算hash
async calculateHashWorker(){
 return new Promise(resolve => {
   const worker = new Worker('/hash.js')
   worker.postMessage({chunks:this.chunks})
   worker.onmessage = e => {
     const {progress,hash} = e.data
     this.hashProgress = Number(progress.toFixed(2))
     if(hash){
       resolve(hash)
     }
   }
 })
}

hash.js子线程

//引入spark-md5计算hash
self.importScripts('spark-md5.min.js')
self.onmessage = e => {
    //接收主线程传递的数据
    const {chunks} = e.data
    const spark = new self.SparkMD5.ArrayBuffer()
    let progress = 0
    let count = 0
    const loadNext = index => {
        const reader = new FileReader()
        reader.readAsArrayBuffer(chunks[index].file)
        reader.onload = e => {
            count ++
            spark.append(e.target.result)
            if(count==chunks.length){
                self.postMessage({
                    progress:100,
                    hash:spark.end()
                })
            }else{
                progress += 100/chunks.length
                self.postMessage({
                    progress
                })
                loadNext(count)
            }
        }
    }
    loadNext(0)
}

requestIdleCallback方式
借鉴react的fiber架构(利用浏览器帧与帧之间的空闲时间)计算hash
但是这块可能会阻塞到其他函数内部回调函数的执行,打乱执行顺序,具体看这儿

import sparkMD5 from 'spark-md5'
async calculateHashIdle(){
  const chunks = this.chunks
  return new Promise(resolve => {
    const spark = new sparkMD5.ArrayBuffer()
    let count = 0
    const appendToSpark = async file=>{
      return new Promise(resolve=>{
        const reader = new FileReader()
        reader.readAsArrayBuffer(file)
        reader.onload = e=>{
          spark.append(e.target.result)
          resolve()
        }
      })
    }
    const workLoop = async deadline=>{
      //timeRemaining获取当前帧的剩余时间
      while(count<chunks.length && deadline.timeRemaining()>1){
        //空闲时间,且有任务
        await appendToSpark(chunks[count].file)
        count++
        if(count<chunks.length){
          this.hashProgress = Number(
            ((100*count)/chunks.length).toFixed(2)
          )
        }else{
          this.hashProgress = 100
          resolve(spark.end())
        }
      }
      window.requestIdleCallback(workLoop)
    }

    window.requestIdleCallback(workLoop)
  })
}

抽样Hash
首尾全要,中间取部分去计算hash。这儿遇到的问题是有可能不同文件hash相同,比如有些图片仅有几像素内容不同

import sparkMD5 from 'spark-md5'
async calculateHashSample(){
  return new Promise(resolve=>{
    const spark = new sparkMD5.ArrayBuffer()
    const reader = new FileReader()
    const file = this.file
    const size = file.size
    const offset = 2*1024*1024
    //第一个2M,最后一个区数据块全要
    const chunks = [file.slice(0,offset)]
    let cur = offset
    while(cur<size){
      if(cur+offset>=size){
        //最后一个区块
        chunks.push(file.slice(cur,cur+offset))
      }else{
        //中间区块,取前中后各2个字节
        const mid = cur+offset/2
        const end = cur+offset
        chunks.push(file.slice(cur, cur+2))
        chunks.push(file.slice(mid, mid+2))
        chunks.push(file.slice(end-2,end))
      }
      cur+=offset
    }
    reader.readAsArrayBuffer(new Blob(chunks))
    reader.onload = e=>{
      spark.append(e.target.result)
      this.hashProgress = 100
      resolve(spark.end())
    }
  })
}

切片文件上传

<template>
  <div class="upload">
    <div>计算hasn进度条</div>
    <div>
        <el-progress :stroke-width='20' :text-inside="true" :percentage="hashProgress"></el-progress>
    </div>
    <el-button type="primary" @click="uploadFile">上传</el-button>
    <div>切片上传进度条</div>
    <!-- chunk.progress 
      progress<0 报错 显示红色
      == 100 成功
      别的数字 方块高度显示 -->
      <!-- 尽可能让方块看起来是正方形
      比如10各方块 4*4
      9 3*3
      100 10*10 -->
    <div v-if="chunks" class="cube-container" :style="{width:cubeWidth+'px'}">
      <div class="cube" v-for="chunk in chunks" :key="chunk.name">
        <div 
          :class="{
            'uploading':chunk.progress>0&&chunk.progress<100,
            'success':chunk.progress==100,
            'error':chunk.progress<0
          }"
          :style="{height:chunk.progress+'%'}"
        >
          <i class="el-icon-loading" style="color:#f56c6c" v-if="chunk.progress<100&&chunk.progress>0"></i>
        </div>
      </div>
    </div>
  </div>
</template>
<style lang="scss">
.cube-container{
  .cube{
    width:14px;
    height:14px;
    line-height:12px;
    border:1px solid #000;
    background: #eee;
    float: left;
    .success{
      background: green;
    }
    .uploading{
      background: blue;
    }
    .error{
      background: red;
    }
  }
}
</style>
//上传整体进度
get uploadProgress(){
    if(!this.file){
        return 0
    }
    const loaded = this.chunks.map(item=>item.chunk.size*item.progress)
                              .reduce((acc,cur)=>acc+cur,0)
    return parseInt(((loaded*100)/this.file.size).toFixed(2))
}
async uploadFile() {
    const chunks = this.createFileChunk(this.file)
    // const hash = await this.calculateHashWorker()
    // const hash = await this.calculateHashIdle()
    const hash = await this.calculateHashSample()
    this.hash = hash
    //切片处理
    this.chunks = chunks.map((chunk,index)=>{
        const name = hash+'-'+index
        return {
            hash,
            name,
            index,
            chunk:chunk.file
        }
    })
    await this.uploadChunks()
  }
  //上传切片
  async uploadChunks(){
    const requests = this.chunks
      .map((chunk,index)=>{
        const form = new FormData()
        form.append('chunk',chunk.chunk)
        form.append('hash',chunk.hash)
        form.append('name',chunk.name)
        return {form,index:chunk.index,error:0}
      })
      .map(({form,index})=>
     	   http.post('/uploadFile',form,{
           onUploadProgress:progress=>{
             //每个区块有自己的进度条,整体的进度条需要计算
             this.chunks[index].progress = Number(((progress.loaded/progress.total)*100).toFixed(2))
           }
         }))
    //发起批量请求
    await Promise.all(requests)
    //所有切片上传完毕,通知后台进行切片内容合并,生成完整图片
    await this.mergeRequest()
  }
  //切片合并
  async mergeRequest(){
    const ret = await http.post('/mergeFile',{
      ext:this.file.name.split('.').pop(),
      size:CHUNK_SIZE,
      hash:this.hash
    })
  }

egg后端处理

const fs = require('fs')
const fse = require('fs-extra')
const path = require('path')
//处理上传的切片
async uploadFile(){
    const {ctx} = this
    //切片上传
    const file = ctx.request.files[0]
    const {hash,name} = ctx.request.body
    /*
    config.default.js
    //文件存储目录
  	config.UPLOAD_DIR = path.resolve(__dirname,'..','app/public')
    */
    const chunkPath = path.resolve(this.config.UPLOAD_DIR,hash)
    if(!fse.existsSync(chunkPath)){
        //创建目录
        await fse.mkdir(chunkPath)
    }
    //将文件移入目录
    await fse.move(file.filepath,`${chunkPath}/${name}`)
    this.message('切片上传成功')
}
//整合切片
async mergeFile(){
  const {ext,size,hash} = this.ctx.request.body
   const filePath = path.resolve(this.config.UPLOAD_DIR,`${hash}.${ext}`)
   await mergeFileCont(filePath,hash,size)
   this.success({
       url:`/public/${hash}.${ext}`
   })
}
async mergeFileCont(filePath,fileHash,size){
	//读取public存放的切片文件夹
   const chunkDir = path.resolve(this.config.UPLOAD_DIR,fileHash)
    let chunks = await fse.readdir(chunkDir)
    chunks.sort((a,b)=>a.split('-')[1] - b.split('-')[1])
    chunks = chunks.map(cp => path.resolve(chunkDir, cp))
    await this.mergeChunks(chunks, filePath, size)
}
async mergeChunks(files, dest, size){
	//将切片流生成新的图片
    const pipStream = (filePath, writeStream) => new Promise(resolve => {
        const readStream = fse.createReadStream(filePath)
        readStream.on('end', () => {
            fse.unlinkSync(filePath)
            resolve()
        })
        readStream.pipe(writeStream)
    })
    await Promise.all(
        files.forEach((file,index) => {
            pipStream(file, fse.createWriteStream(dest, {
                start: index * size,
                end: (index + 1) * size
            }))
        })
    )
}

异步任务并发控制

上面切片上传采用promise.all的方式将所有异步任务同时触发,请求过多可能会造成页面假死、阻塞流程、报错等问题。
这儿限定每次请求只能进行固定次数的异步任务,每请求成功一个则新增一个,确保并发的数量固定。此外,针对可能的报错失败重试三次的操作,若重试三次都报错,则流程终止。

//上传切片
async uploadChunks(){
  const requests = this.chunks
    .map((chunk,index)=>{
      const form = new FormData()
      form.append('chunk',chunk.chunk)
      form.append('hash',chunk.hash)
      form.append('name',chunk.name)
      return {form,index:chunk.index,error:0}
    })
  //发起批量请求
  await sendRequest(requests)
  //所有切片上传完毕,通知后台进行切片内容合并,生成完整图片
  await this.mergeRequest()
}
async sendRequest(chunks){
 return new Promise((resolve,reject)=>{
   const len = chunks.length
   //限制每次最多只能同时发起4次请求
   let limit = len > 4 ? 4 : len
   let counter = 0
   let isStop = false
   const start = async () => {
     if(isStop) return
     const task = chunks.shift()
     if(task){
       const {form,index} = task
       try{
         await http.post('/uploadFile',form,{
           onUploadProgress:progress=>{
             this.chunks[index].progress = (((progress.loaded/progress.total)*100).toFixed(2))
           }
         })
         if(counter==len-1){
           resolve()
         }else{
           counter++
           //启动下一个任务
           start()
         }
       }catch(e){
         this.chunks[index].progress = -1
         if(task.error<3){
           task.error++
           chunks.unshift(task)
           start()
         }else{
           //错误三次
           isStop = true
           reject()
         }
       }
     }
   }
   while(limit>0){
     //启动limit个任务
     //模拟下延迟任务
     setTimeout(()=>{
       start()
     },Math.random()*2000)
     limit-=1
   }
 })
}

断点续传

大文件上传,中间如因网络或其他原因使得上传出错,重新去上传是一个很麻烦的事情。在上文切片上传的基础上,中间出错但是之前的切片内容已保存到后台,这时可以通过发起判断,后台返回已上传的切片列表。再次上传的时候,只需要过滤掉已经上传过的,只传未上传的就好。

async uploadFile() {
	//断点续传,判断切片是否已存在
    const ret = await http.post('/checkFile',{
      hash:this.hash,
      ext:this.file.name.split('.').pop()
    })
    //uploaded为上传结果
    //uploadedList为已上传切片列表
    const {uploaded,uploadedList} = ret.data.data
    //切片上传
    this.chunks = chunks.map((chunk,index)=>{
        const name = hash+'-'+index
        return {
            hash,
            name,
            index,
            chunk:chunk.file,
            //这块加入已上传判断,使得已上传过的进度条可以直接显示出来
            progress:uploadedList.indexOf(name)>-1?100:0
        }
    })
    await this.uploadChunks(uploadedList)
}
//上传切片
  async uploadChunks(uploadedList=[]){
    const requests = this.chunks
      //这块过滤掉已经上传的切片
      .filter(chunk=>uploadedList.indexOf(chunk.name)==-1)
      .map((chunk,index)=>{
        const form = new FormData()
        form.append('chunk',chunk.chunk)
        form.append('hash',chunk.hash)
        form.append('name',chunk.name)
        return {form,index:chunk.index,error:0}
      })
    await this.sendRequest(requests)
    await this.mergeRequest()
  }

egg后台处理

const fs = require('fs')
const fse = require('fs-extra')
const path = require('path')
async checkFile(){
     const {ext,hash} = this.ctx.request.body
     const filePath = path.resolve(this.config.UPLOAD_DIR,`${hash}.${ext}`)
     let uploaded = false
     let uploadedList = []
     if(fse.existsSync(filePath)){
         //文件存在
         uploaded = true
     }else{
         uploadedList = await this.getUploadedList(path.resolve(this.config.UPLOAD_DIR,hash))
     }
     this.success({
         uploaded,
         uploadedList
     })
 }
 async getUploadedList(dirPath){
     return fse.existsSync(dirPath)
         ?(await fse.readdir(dirPath)).filter(name=>name[0]!=='.')
         :[]
 }

秒传

即在上传之前,先调取后端接口,判断是否文件已存在,若存在直接提示秒传成功。

async uploadFile() {
	//断点续传,判断切片是否已存在
    const ret = await http.post('/checkFile',{
      hash:this.hash,
      ext:this.file.name.split('.').pop()
    })
    //uploaded为上传结果
    //uploadedList为已上传切片列表
    const {uploaded,uploadedList} = ret.data.data
    if(uploaded){
      return this.$alert('秒传成功')
    }
}

版权声明:本文为weixin_40970987原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接和本声明。