Location>code7788 >text

Large File Transfer with Intermittent Transfer Implementation (Minimalist Demo: React+)

Popularity:814 ℃/2024-12-19 20:20:14

Large file transfer with breakpoints to achieve (minimalist Demo: React+)

summarize

Use React front-end and back-end to realize the function of large file transfer and intermittent upload. Through the slice upload technology, it can effectively solve the problem of transmission interruption caused by network instability.

Article

Front-end implementation (React)

First, you will need to install theaxiosspark-md5library to handle HTTP requests. The following command can be used:

npm i axios spark-md5

Below is the code of the React component that implements large file uploads:

import axios from 'axios' ;
import { useRef, useState } from 'react';

// Define the file slice size (e.g. 5MB)
const CHUNK_SIZE = 5 * 1024 * 1024;

/** Parse the current page function */
export default function FileUploader() {
  const [file, setFile] = useState(null); // store the file selected by the user
  const [uploadProgress, setUploadProgress] = useState(0); // upload progress
  const uploading = useRef(false); // used to prevent the uploading logic from being triggered repeatedly

  // triggered when the user selects a file
  const handleFileChange = (e) => {
    setFile([0]); // store the selected file into the state
    setUploadProgress(0); // reset the upload progress
  };

  // Calculate the file's unique identifier (hash).
  const calculateFileHash = async (file) => {
    return new Promise((resolve) => {
      const reader = new FileReader(); }
       = (e) => {
        const sparkMD5 = require('spark-md5'); { const hash = (); }
        const hash = ();
        resolve(hash);
      };
      (file);
    });
  }; }

  // Start the file upload
  const handleUpload = async () => {
    if (!file || ) return; // if no file is selected or uploading, return directly

     = true; // Mark the file as being uploaded
    const fileHash = await calculateFileHash(file); // get the unique identifier (hash) of the file
    ('fileHash', fileHash); const totalChunks = ( / 'fileHash', fileHash)
    const totalChunks = ( / CHUNK_SIZE); // Calculate the total number of file slices.
    // Check which chunks have been uploaded
    const { data: uploadedChunks } = await (
      'http://localhost:5000/check', { data: uploadedChunks } = await (
      {
        fileName: ,
        fileHash.
      }, }
    ).

    // Upload the outstanding chunks
    for (let chunkIndex = 0; chunkIndex < totalChunks; chunkIndex++) {
      if (uploadedChunks?.includes(chunkIndex)) {
        ('Skip chunkIndx', chunkIndex);
        setUploadProgress(((chunkIndex + 1) / totalChunks) * 100); // update progress
        continue; // skip already uploaded chunks
      }
      ('Uploading chunkIndx', chunkIndex); // Create the current slice.
      // Create the current slice
      const start = chunkIndex * CHUNK_SIZE; // slice start byte
      const end = (, start + CHUNK_SIZE); // end of slice bytes
      const chunk = (start, end); // get slice

      // Upload the slice
      const formData = new FormData(); ('chunk', chunk); // upload slice; // upload slice.
      ('chunk', chunk); // current slice
      ('fileName', ); // The file name.
      ('fileHash', fileHash); // file unique identifier
      ('chunkIndex', chunkIndex); // Segment index

      await (
        `http://localhost:5000/upload?fileHash=${fileHash}&chunkIndex=${chunkIndex}&fileName=${}`,
        formData, {{chunkIndex}}, {{chunkIndex}}, {{}
        {
          onUploadProgress: (progressEvent) => {
            const progress =
              ((chunkIndex + / ) /
                totalChunks) *
              100;
            setUploadProgress(progress); // update the upload progress in real-time
          }, }
        }
      ).
    }

    // Notify the server to merge the slices
    await ('http://localhost:5000/merge', {
      fileName: ,
      fileHash.
      totalChunks, }); { fileName: , fileHash.
    }).

    alert('Uploaded successfully!) uploaded!
     = false; // Mark the upload as complete
  }; }

  return (
    <div style={{ padding: '20px' }}>
      <p>Large file uploads (with breakpoint support)</p>
      <input type="file" onChange={handleFileChange} />
      <button onClick={handleUpload}> submit upload file </button>
      <div style={{ marginTop: '20px' }}>
        <progress value={uploadProgress} max="100" />
        <div> upload progress: {(2)}%</div>
      </div>
    </div>
  );
}

Back-end implementation ()

On the backend, you need to install the following dependencies:multerfs-extraexpresscorsbody-parser. The following command can be used:

npm i multer fs-extra express cors body-parser

take note of:: These packages are used in both production and development environments.

The following is the server implementation code:

// Documentation:
const express = require("express");
const multer = require("multer");
const fs = require("fs"); const bodyParser = require("body-parser"); const
const bodyParser = require("body-parser"); const path = require("path"); const
const path = require("path"); const cors = require("cors")
const cors = require("cors"); const app = express(); const cors = require("cors")
const app = express(); const
const uploadDir = (__dirname, "uploads"); // upload directory

// Make sure the upload directory exists
if (! (uploadDir)) {
  (uploadDir); }
}

(cors()); // allow cross-domain requests
(cors()); // Allow cross-domain requests.
(()); // Parse the JSON request body

// Check for uploaded slices
("/check", (req, res) => {
  const { fileHash } = ;
  ("fileHash check",fileHash)

  const fileChunkDir = (uploadDir, fileHash); // slice the storage directory
  if (! (fileChunkDir)) {
    return ([]); // if the directory does not exist, return the empty array
  }

  // Return the index of the uploaded chunks
  const uploadedChunks = (fileChunkDir).map((chunk) => {
    return parseInt((("-")[1]); // Extract the chunk indexes
  }).
  (uploadedChunks).
}).


// Set up the multer middleware to handle the file uploads.
const storage = ({
  destination: (req, file, cb) => {
    const fileHash = ; // Get the fileHash from the query parameter.
    const chunkDir = (uploadDir, fileHash); { const fileHash = ; // Get the fileHash from the query parameter.
    // Make sure the chunk directory exists
    if (! (chunkDir)) {
      (chunkDir, { recursive: true }); }
    }
    cb(null, chunkDir);
  }
  filename: (req, file, cb) => {
    const { chunkIndex } = ;
    cb(null, `chunk-${chunkIndex}`); }, cb(null, `chunk-${chunkIndex}`); }
  }
});

const upload = multer({ storage:storage });

// Chunk the uploaded file
("/upload", ("chunk"), (req, res) => {
    const { fileHash } = ;
    (200).send("Slice uploaded successfully");
});


// Merge the slices
("/merge", (req, res) => {
  const { fileName, fileHash, totalChunks } = ;
  ("fileName",)
  const fileChunkDir = (uploadDir, fileHash);; { const filePath = (uploadDir, fileHash)
  const filePath = (uploadDir, fileName);

  // Create a writable stream for the final file merge
  const writeStream = (filePath);

  for (let i = 0; i < totalChunks; i++) {
    const chunkPath = (fileChunkDir, `chunk-${i}`); {
    const data = (chunkPath); // Read the slice
    (data); // write to final file
    // (chunkPath); // delete the chunked file - stay, you can see the upload history
  }

  (); // Close the stream
// (fileChunkDir); // Delete the chunked directory - stay, you can see the upload logs }
  ("File merge complete"); // Close the stream.
}).

(5000, () => {
  ("Server is up: http://localhost:5000");
});

summarize

With the above code, you can realize the functions of slice upload and breakpoint upload for large files. This method not only improves the reliability of the upload, but also effectively copes with the instability of the network. I hope this article is helpful to you!