Node.js implements breakpoint resume

Node.js implements breakpoint resume

Solution Analysis

slice

  • It is to split the uploaded video. The specific operations are:
  • File.slice(start,end): returns a new blob object
    • Copy the starting bytes of the blob
    • Copy the end byte of the blob

Resume download

  • Before uploading each slice, request the server interface to read the number of uploaded slices of the same file
  • If the uploaded file is a new file, the server returns 0, otherwise it returns the number of uploaded slices.

Specific solution process

This demo provides key ideas and methods. Other functions such as file restrictions, lastModifiedDate verification of file duplication, regular clearing of cached files and other functional extensions can be added based on this code.

html

<input class="video" type="file" />
<button type="submit" onclick="handleVideo(event, '.video', 'video')">
    Submit</button>

script

let count = 0; // Record the file index to be uploaded const handleVideo = async (event, name, url) => {
// Prevent browser default form events event.preventDefault();
let currentSize = document.querySelector("h2");
let files = document.querySelector(name).files;
//Default number of slices const sectionLength = 100;
// First request the interface to check whether the file exists on the server // If count is 0, it is the first upload. If count is not 0, the file exists on the server and the number of uploaded slices is returned count = await handleCancel(files[0]);

//Declare the array object storing the slice let fileCurrent = [];
// Loop file file object for (const file of [...files]) {
  // Get the size of each slice let itemSize = Math.ceil(file.size / sectionLength);
  // Loop through file sizes and store file blobs in array let current = 0;
  for (current; current < file.size; current += itemSize) {
    fileCurrent.push({ file: file.slice(current, current + itemSize) });
  }
  // axios simulates manual cancellation request const CancelToken = axios.CancelToken;
  const source = CancelToken.source();
  // When resuming the upload, process the number of slices. If the uploaded slices have already been uploaded, there is no need to request to upload again fileCurrent =
    count === 0 ? fileCurrent : fileCurrent.slice(count, sectionLength);
  // Loop slice request interface for (const [index, item] of fileCurrent.entries()) {
    //Simulate request pause||Network disconnectedif (index > 90) {
      source.cancel("Cancel request");
    }
    // Save file related information // file is a slice blob object // filename is the file name // index is the current slice number // total is the total slice number let formData = new FormData();
    formData.append("file", item.file);
    formData.append("filename", file.name);
    formData.append("total", sectionLength);
    formData.append("index", index + count + 1);

    await axios({
      url: `http://localhost:8080/${url}`,
      method: "POST",
      data: formData,
      cancelToken: source.token,
    })
      .then((response) => {
        // Return data to display progress currentSize.innerHTML = `progress${response.data.size}%`;
      })
      .catch((err) => {
        console.log(err);
      });
  }
}
};

// Request interface to check whether the uploaded file exists // count is 0, which means it does not exist. If count is not 0, the corresponding number of slices has been uploaded const handleCancel = (file) => {
return axios({
  method: "post",
  url: "http://localhost:8080/getSize",
  headers: { "Content-Type": "application/json; charset = utf-8" },
  data: {
    fileName: file.name,
  },
})
  .then((res) => {
    return res.data.count;
  })
  .catch((err) => {
    console.log(err);
  });
};

Node server

// Use express to build server api
const express = require("express");
//Introduce the logic code for uploading files const upload = require("./upload_file");
// Process all responses and set cross-domain app.all("*", (req, res, next) => {
  res.header("Access-Control-Allow-Origin", "*");
  res.header("Access-Control-Allow-Headers", "X-Requested-With");
  res.header("Access-Control-Allow-Methods", "PUT,POST,GET,DELETE,OPTIONS");
  res.header("Access-Control-Allow-Headers", "Content-Type, X-Requested-With");
  res.header("X-Powered-By", " 3.2.1");
  res.header("Content-Type", "application/json;charset=utf-8");
  next();
});
const app = express();

app.use(bodyParser.json({ type: "application/*+json" }));
// Video upload (query the current number of slices)
app.post("/getSize", upload.getSize);
// Video upload interface app.post("/video", upload.video);

// Enable local port listening app.listen(8080);

upload_file

// File upload module const formidable = require("formidable");
// File system module const fs = require("fs");
// System path module const path = require("path");

//Operation write file stream const handleStream = (item, writeStream) => {
  // Read the corresponding directory file buffer
  const readFile = fs.readFileSync(item);
  // Write the read buffer || chunk to the stream writeStream.write(readFile);
  // After writing, clear the temporarily stored slice file fs.unlink(item, () => {});
};

// Video upload (slice)
module.exports.video = (req, res) => {
  // Create parsing object const form = new formidable.IncomingForm();
  // Set the video file upload path let dirPath = path.join(__dirname, "video");
  form.uploadDir = dirPath;
  // Whether to keep the uploaded file name suffix form.keepExtensions = true;
  // err error object contains error information if parsing fails // fields contains formData key-value objects other than binary // file object type information about uploaded files form.parse(req, async (err, fields, file) => {
    // Get the uploaded file blob object let files = file.file;
    // Get the current slice index
    let index = fields.index;
    // Get the total number of slices let total = fields.total;
    // Get the file name let filename = fields.filename;
    // Rewrite the upload file name and set the temporary directory let url =
      dirPath +
      "/" +
      filename.split(".")[0] +
      `_${index}.` +
      filename.split(".")[1];
    try {
      // Synchronously modify the uploaded file name fs.renameSync(files.path, url);
      console.log(url);
      // Asynchronous processing setTimeout(() => {
        // Determine whether the last slice has been uploaded and write all the videos together if (index === total) {
          // Synchronously create a new directory to store the complete video let newDir = __dirname + `/uploadFiles/${Date.now()}`;
          // Create directory fs.mkdirSync(newDir);
          // Create a writable stream to write to the file let writeStream = fs.createWriteStream(newDir + `/${filename}`);
          let fsList = [];
          // Take out all slice files and put them into array for (let i = 0; i < total; i++) {
            const fsUrl =
              dirPath +
              "/" +
              filename.split(".")[0] +
              `_${i + 1}.` +
              filename.split(".")[1];
            fsList.push(fsUrl);
          }
          // Loop through the slice file array and write to the stream for (let item of fsList) {
            handleStream(item, writeStream);
          }
          // Write all and close the stream write stream writeStream.end();
        }
      }, 100);
    } catch (e) {
      console.log(e);
    }
    res.send({
      code: 0,
      msg: "Upload successful",
      size: index,
    });
  });
};

// Get the number of file slices module.exports.getSize = (req, res) => {
  let count = 0;
  req.setEncoding("utf8");
  req.on("data", function (data) {
    let name = JSON.parse(data);
    let dirPath = path.join(__dirname, "video");
    // Calculate the number of uploaded slice files let files = fs.readdirSync(dirPath);
    files.forEach((item, index) => {
      let url =
        name.fileName.split(".")[0] +
        `_${index + 1}.` +
        name.fileName.split(".")[1];
      if (files.includes(url)) {
        ++count;
      }
    });
    res.send({
      code: 0,
      msg: "Please continue uploading",
      count,
    });
  });
};

Logical Analysis

front end

  • First, request to upload to check whether the file is uploaded for the first time or the corresponding slice already exists
    • When the file is uploaded for the first time, the slice starts from 0
    • If the file already has a corresponding slice, then the upload request starts from the slice number.
  • Loop through the slice array and upload each slice file
    • It uses a simulated manual pause request and cancels the request when the number of slices is greater than 90

Server

  • Receive the query file filename, find the temporary storage file address, and determine whether the corresponding uploaded file exists
    • If the file has never been uploaded, it returns 0 and the slice number starts from 0.
    • If the file has been uploaded, the corresponding number of slices will be returned
  • Receive uploaded file slices and store the files in a temporary storage directory
    • Use count and total to determine whether the slice has been uploaded.
    • After uploading, create a file save directory and create a writable stream for writing.
    • Extract the corresponding temporary files and put them into the array, loop the file directory array, read and write the file buffer in sequence
    • After writing, close the writable stream.

summary

The above code may be changed or deviated depending on the specific business process. This is just one of the specific implementation methods.
I hope this article can be helpful to everyone. If there are any mistakes in the writing, I hope you can point them out.

The above code address: github.com/Surprise-li…

The above is the details of how to implement breakpoint resume in Node.js. For more information about breakpoint resume in Node.js, please pay attention to other related articles on 123WORDPRESS.COM!

You may also be interested in:
  • Example of uploading large files in pieces based on Node.js
  • Summary of the principles and methods of file breakpoint resuming in Node
  • Implementation of breakpoint resume in Node.js
  • React+Node realizes the idea of ​​uploading large files in pieces and resuming the upload in seconds

<<:  MySQL 8.0.15 installation and configuration method graphic tutorial under win10 home version 64

>>:  Issues with upgrading Python and installing Mongodb drivers under Centos

Recommend

Detailed process of building mysql5.7.29 on centos7 of linux

1. Download MySQL 1.1 Download address https://do...

Linux /etc/network/interfaces configuration interface method

The /etc/network/interfaces file in Linux is used...

Web designer's growth experience

<br />First of all, I have to state that I a...

Mini Program to Implement the Complete Shopping Cart

The mini program implements a complete shopping c...

A simple method to implement Linux timed log deletion

Introduction Linux is a system that can automatic...

Specific use of Bootstrap5 breakpoints and containers

Table of contents 1. Bootstrap5 breakpoints 1.1 M...

How to use Docker Swarm to build WordPress

cause I once set up WordPress on Vultr, but for w...

Realizing the effect of carousel based on jQuery

This article shares the specific code of jQuery t...

Win10 install Linux ubuntu-18.04 dual system (installation guide)

I installed a Linux Ubuntu system on my computer....

Two ways to prohibit clearing the input text input cache in html

Most browsers will cache input values ​​by defaul...

How to configure environment variables in Linux environment

JDK download address: http://www.oracle.com/techn...

MySQL establishes efficient index example analysis

This article uses examples to describe how to cre...

CSS solves the misalignment problem of inline-block

No more nonsense, post code HTML part <div cla...

Summary of 16 XHTML1.0 and HTML Compatibility Guidelines

1. Avoid declaring the page as XML type . The pag...