我不能上传像视频这样的大文件到s3。它最终会超时。我曾尝试使用fs来流式传输它,但我肯定没有正确使用它。
我已经尝试了我能想到的所有方法来让fs流式传输这个文件。我不知道是否可以在单独的上传路径中使用fs,就像我在multerS3中使用的那样。我可以上传图片和非常小的视频,但仅此而已。
// Here is my s3 index file which exports upload
const crypto = require('crypto');
const aws = require('aws-sdk');
const multerS3 = require('multer-s3');
const fs = require('fs');
aws.config.update({
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
accessKeyId: process.env.AWS_ACCESS_KEY_ID,
region: 'us-east-1',
ACL: 'public-read'
});
const s3 = new aws.S3({ httpOptions: { timeout: 10 * 60 * 1000 }});
var options = { partSize: 5 * 1024 * 1024, queueSize: 10 };
const fileFilter = (req, file, cb) => {
console.log('file.mimetype is ', file.mimetype);
if (file.mimetype === 'image/jpeg' || file.mimetype === 'image/png' || file.mimetype === 'video/mp4' || file.mimetype === 'video/avi' || file.mimetype === 'video/mov' || file.mimetype === 'video/quicktime') {
cb(null, true);
} else {
cb(new Error('Invalid file type'), false);
}
}
const filename = getFileName();
const upload = multer({
fileFilter,
storage: multerS3({
acl: 'public-read',
s3,
options,
body: fs.createReadStream(filename),
bucket: 'skilljack',
metadata: function (req, file, cb) {
cb(null, {fieldName: 'TESTING_METADATA'})
},
key: function (req, file, cb) {
let buf = crypto.randomBytes(16);
buf = buf.toString('hex');
let uniqFileName = file.originalname.replace(/\.jpeg|\.jpg|\.png|\.avi|\.mov|\.mp4/ig, '');
uniqFileName += buf;
cb(undefined, uniqFileName );
}
})
});
function getFileName (req, file) {
if (file) {
const body = fs.createReadStream(file.originalname);
return body;
}
}
module.exports = {
upload
}
// Here is my route file
const express = require('express');
const router = express.Router({ mergeParams: true });
const multer = require('multer');
const { upload } = require('../s3');
const { asyncErrorHandler, isLoggedIn, isAuthor } = require('../middleware');
const {
postCreate,
postDestroy
} = require('../controllers/posts');
router.post('/', isLoggedIn, asyncErrorHandler(isAuthor), upload.single('image'), asyncErrorHandler(postCreate));
router.delete('/:post_id', isLoggedIn, asyncErrorHandler(isAuthor), asyncErrorHandler(postDestroy));
module.exports = router;发布于 2020-09-17 22:18:16
除了setting the queueSize to 1之外,您可能还想禁用超时。
const s3 = new aws.S3({
accessKeyId: config.get('accessKeyId'),
secretAccessKey: config.get('secretAccessKey'),
Bucket: config.get('bucket'),
});
s3.config.httpOptions.timeout = 0发布于 2020-07-23 17:46:03
几天来都有同样的问题。避免这种情况的一种方法(但需要很长时间)是将queueSize减少到1。
当您网络不是很好时,这种情况就会发生,最终,一些队列将保持空闲并处理超时。
QueueSize设置为1将允许为正在上传的部分分配更多带宽,从而避免超时。
https://stackoverflow.com/questions/56553056
复制相似问题