When we do file upload, if the file is too large, it may cause the request to time out.Therefore, when you encounter the need to upload large files, you need to upload files in pieces.At the same time, if the file is too large, how to continue the breakpoint when the network is not good?You also need to record the current upload file and make a judgment the next time you make an upload request.
Code first: Code repository address
Front end
1. index.html
<!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <meta http-equiv="X-UA-Compatible" content="ie=edge"> <title>File Upload</title> <script src="https://cdn.bootcss.com/axios/0.18.0/axios.min.js"></script> <script src="https://code.jquery.com/jquery-3.4.1.js"></script> <script src="./spark-md5.min.js"></script> <script> $(document).ready(() => { const chunkSize = 1 * 1024 * 1024; // The size of each chunk, set to 1 Megabyte // Use the Blob.slice method to split the file. // This method is also used differently in different browsers. const blobSlice = File.prototype.slice || File.prototype.mozSlice || File.prototype.webkitSlice; const hashFile = (file) => { return new Promise((resolve, reject) => { const chunks = Math.ceil(file.size / chunkSize); let currentChunk = 0; const spark = new SparkMD5.ArrayBuffer(); const fileReader = new FileReader(); function loadNext() { const start = currentChunk * chunkSize; const end = start + chunkSize >= file.size ? file.size : start + chunkSize; fileReader.readAsArrayBuffer(blobSlice.call(file, start, end)); } fileReader.onload = e => { spark.append(e.target.result); // Append array buffer currentChunk += 1; if (currentChunk < chunks) { loadNext(); } else { console.log('finished loading'); const result = spark.end(); // If result s are used as hash values only, if the contents of the file are the same and the names are different // You cannot keep two files if you want to.So add the file name. const sparkMd5 = new SparkMD5(); sparkMd5.append(result); sparkMd5.append(file.name); const hexHash = sparkMd5.end(); resolve(hexHash); } }; fileReader.onerror = () => { console.warn('File reading failed!'); }; loadNext(); }).catch(err => { console.log(err); }); } const submitBtn = $('#submitBtn'); submitBtn.on('click', async () => { const fileDom = $('#file')[0]; // The resulting files are an array of File objects and, if multiple selections are allowed, multiple files const files = fileDom.files; const file = files[0]; if (!file) { alert('No file was obtained'); return; } const blockCount = Math.ceil(file.size / chunkSize); // Total number of slices const axiosPromiseArray = []; // axiosPromise array const hash = await hashFile(file); //File hash // After obtaining the file hash, if breakpoint continuation is required, it can be checked in the background based on the hash value. // See if the file has been uploaded and if the transfer is complete and the slices have been uploaded. console.log(hash); for (let i = 0; i < blockCount; i++) { const start = i * chunkSize; const end = Math.min(file.size, start + chunkSize); // Build a form const form = new FormData(); form.append('file', blobSlice.call(file, start, end)); form.append('name', file.name); form.append('total', blockCount); form.append('index', i); form.append('size', file.size); form.append('hash', hash); // ajax submits a slice, where content-type is multipart/form-data const axiosOptions = { onUploadProgress: e => { // Progress in processing uploads console.log(blockCount, i, e, file); }, }; // Add to Promise Array axiosPromiseArray.push(axios.post('/file/upload', form, axiosOptions)); } // Request merge of slice files after all slice uploads await axios.all(axiosPromiseArray).then(() => { // Merge chunks const data = { size: file.size, name: file.name, total: blockCount, hash }; axios .post('/file/merge_chunks', data) .then(res => { console.log('Upload Successful'); console.log(res.data, file); alert('Upload Successful'); }) .catch(err => { console.log(err); }); }); }); }) window.onload = () => { } </script> </head> <body> <h1>Large File Upload Test</h1> <section> <h3>Custom Upload File</h3> <input id="file" type="file" name="avatar"/> <div> <input id="submitBtn" type="button" value="Submit"> </div> </section> </body> </html>
2. Dependent Files
back-end
1. app.js
const Koa = require('koa'); const app = new Koa(); const Router = require('koa-router'); const multer = require('koa-multer'); const serve = require('koa-static'); const path = require('path'); const fs = require('fs-extra'); const koaBody = require('koa-body'); const { mkdirsSync } = require('./utils/dir'); const uploadPath = path.join(__dirname, 'uploads'); const uploadTempPath = path.join(uploadPath, 'temp'); const upload = multer({ dest: uploadTempPath }); const router = new Router(); app.use(koaBody()); /** * single(fieldname) * Accept a single file with the name fieldname. The single file will be stored in req.file. */ router.post('/file/upload', upload.single('file'), async (ctx, next) => { console.log('file upload...') // Create a folder based on the file hash and move the default uploaded files under the current hash folder.Convenient for subsequent file merge. const { name, total, index, size, hash } = ctx.req.body; const chunksPath = path.join(uploadPath, hash, '/'); if(!fs.existsSync(chunksPath)) mkdirsSync(chunksPath); fs.renameSync(ctx.req.file.path, chunksPath + hash + '-' + index); ctx.status = 200; ctx.res.end('Success'); }) router.post('/file/merge_chunks', async (ctx, next) => { const { size, name, total, hash } = ctx.request.body; // Get the slice file based on the hash value. // Create Storage File // merge const chunksPath = path.join(uploadPath, hash, '/'); const filePath = path.join(uploadPath, name); // Read all chunks file names stored in the array const chunks = fs.readdirSync(chunksPath); // Create Storage File fs.writeFileSync(filePath, ''); if(chunks.length !== total || chunks.length === 0) { ctx.status = 200; ctx.res.end('Number of slice files does not match'); return; } for (let i = 0; i < total; i++) { // Append Write to File fs.appendFileSync(filePath, fs.readFileSync(chunksPath + hash + '-' +i)); // Delete chunk used this time fs.unlinkSync(chunksPath + hash + '-' +i); } fs.rmdirSync(chunksPath); // File merge succeeded, file information can be stored in the library. ctx.status = 200; ctx.res.end('Merge succeeded'); }) app.use(router.routes()); app.use(router.allowedMethods()); app.use(serve(__dirname + '/static')); app.listen(9000);
2. utils/dir.js
const path = require('path'); const fs = require('fs-extra'); const mkdirsSync = (dirname) => { if(fs.existsSync(dirname)) { return true; } else { if (mkdirsSync(path.dirname(dirname))) { fs.mkdirSync(dirname); return true; } } } module.exports = { mkdirsSync };
Instructions for operation steps
Setup of Server
All of the following are guaranteed to be done with the node and npm installed.Installation and use of node can be referred to Official website.
- New project folder file-upload
- Initialize a project with npm: CD file-upload && NPM init
-
Installation-related dependencies
npm i koa npm i koa-router --save // Koa Routing npm i koa-multer --save // File Upload Processing Module npm i koa-static --save // Koa Static Resource Processing Module npm i fs-extra --save // File Processing npm i koa-body --save // Request Parameter Resolution
-
Create project structure
file-upload - static - index.html - spark-md5.min.js - uploads - temp - utils - dir.js - app.js
- Copy the corresponding code to the specified location
- Project startup: node app.js (services can be managed using nodemon)
- Visit: http://localhost:9000/index.html
Details of the code are accompanied by notes to help you browse through the code at a glance.
Follow-up extension: breakpoint continuous upload, multi-file multiple batch upload