[英]AWS SDK file upload to S3 via Node/Express using stream PassThrough - file is always corrupt
这很简单。 使用此代码,任何上传的图像文件都已损坏且无法打开。 PDF 看起来不错,但我注意到它正在将值注入基于文本的文件中。 这是 s3 中正确的文件大小,而不是像出了问题那样为零。 我不确定这是否是 Express、SDK 或两者兼而有之的问题? 是 Postman 吗? 我在今年 3 月的一个工作项目中构建了类似的东西,它完美无缺。 我不再有权访问该代码进行比较。
没有错误,没有任何问题的迹象。
const aws = require("aws-sdk");
const stream = require("stream");
const express = require("express");
const router = express.Router();
const AWS_ACCESS_KEY_ID = "XXXXXXXXXXXXXXXXXXXX";
const AWS_SECRET_ACCESS_KEY = "superSecretAccessKey";
const BUCKET_NAME = "my-bucket";
const BUCKET_REGION = "us-east-1";
const s3 = new aws.S3({
region: BUCKET_REGION,
accessKeyId: AWS_ACCESS_KEY_ID,
secretAccessKey: AWS_SECRET_ACCESS_KEY
});
const uploadStream = key => {
let streamPass = new stream.PassThrough();
let params = {
Bucket: BUCKET_NAME,
Key: key,
Body: streamPass
};
let streamPromise = s3.upload(params, (err, data) => {
if (err) {
console.error("ERROR: uploadStream:", err);
} else {
console.log("INFO: uploadStream:", data);
}
}).promise();
return {
streamPass: streamPass,
streamPromise: streamPromise
};
};
router.post("/upload", async (req, res) => {
try {
let key = req.query.file_name;
let { streamPass, streamPromise } = uploadStream(key);
req.pipe(streamPass);
await streamPromise;
res.status(200).send({ result: "Success!" });
} catch (e) {
res.status(500).send({ result: "Fail!" });
}
});
module.exports = router;
这是我的 package.json:
{
"name": "expresss3streampass",
"version": "0.0.0",
"private": true,
"scripts": {
"start": "node ./bin/www"
},
"dependencies": {
"aws-sdk": "^2.812.0",
"cookie-parser": "~1.4.4",
"debug": "~2.6.9",
"express": "~4.16.1",
"morgan": "~1.9.1"
}
}
更新:
经过进一步测试,我注意到纯文本文件正在被 Postman 更改。 例如,这个源文件:
{
"question_id": null,
"position_type_id": 1,
"question_category_id": 1,
"position_level_id": 1,
"question": "Do you test your code before calling it \"done\"?",
"answer": "Candidate should respond that they at least happy path test every feature and bug fix they write.",
"active": 1
}
...它落入桶中后看起来像这样:
----------------------------472518836063077482836177
Content-Disposition: form-data; name="file"; filename="question.json"
Content-Type: application/json
{
"question_id": null,
"position_type_id": 1,
"question_category_id": 1,
"position_level_id": 1,
"question": "Do you test your code before calling it \"done\"?",
"answer": "Candidate should respond that they at least happy path test every feature and bug fix they write.",
"active": 1
}
----------------------------472518836063077482836177--
我不得不认为这是问题所在。 Postman 是这个等式中唯一改变的东西,从这段代码第一次为我工作起。 我的请求标头如下所示:
我是最初添加“application/x-www-form-urlencoded”header 的人。 如果我现在使用它,我最终会在存储桶中得到一个 0 字节的文件。
Multer是通往go的方式。
它提供了几种不同的模式,但据我所知,您必须编写自定义存储处理程序才能访问底层 Stream,否则它将缓冲 memory 中的所有数据,并且只有在完成后才回调。
如果您在路由处理程序中检查req.file
,Multer 通常会在buffer
字段下提供一个 Buffer ,但它不再存在,因为我没有在回调中传递任何东西,所以我有理由相信这是按预期进行的流式传输.
下面是一个可行的解决方案。
注意: parse.single('image')
被传递到路由处理程序中。 这是指我使用的多部分字段名称。
const aws = require('aws-sdk');
const stream = require('stream');
const express = require('express');
const router = express.Router();
const multer = require('multer')
const AWS_ACCESS_KEY_ID = "XXXXXXXXXXXXXXXXXXXX";
const AWS_SECRET_ACCESS_KEY = "superSecretAccessKey";
const BUCKET_NAME = "my-bucket";
const BUCKET_REGION = "us-east-1";
const s3 = new aws.S3({
region: BUCKET_REGION,
accessKeyId: AWS_ACCESS_KEY_ID,
secretAccessKey: AWS_SECRET_ACCESS_KEY
});
const uploadStream = key => {
let streamPass = new stream.PassThrough();
let params = {
Bucket: BUCKET_NAME,
Key: key,
Body: streamPass
};
let streamPromise = s3.upload(params, (err, data) => {
if (err) {
console.error('ERROR: uploadStream:', err);
} else {
console.log('INFO: uploadStream:', data);
}
}).promise();
return {
streamPass: streamPass,
streamPromise: streamPromise
};
};
class CustomStorage {
_handleFile(req, file, cb) {
let key = req.query.file_name;
let { streamPass, streamPromise } = uploadStream(key);
file.stream.pipe(streamPass)
streamPromise.then(() => cb(null, {}))
}
}
const storage = new CustomStorage();
const parse = multer({storage});
router.post('/upload', parse.single('image'), async (req, res) => {
try {
res.status(200).send({ result: 'Success!' });
} catch (e) {
console.log(e)
res.status(500).send({ result: 'Fail!' });
}
});
module.exports = router;
我上面提供的基于Multer的解决方案有点老套。 所以我在引擎盖下看了一下,看看它是如何工作的。 此解决方案仅使用Busboy来解析和 stream 文件。 Multer 实际上只是一个带有一些磁盘 I/O 便利功能的包装器。
const aws = require('aws-sdk');
const express = require('express');
const Busboy = require('busboy');
const router = express.Router();
const AWS_ACCESS_KEY_ID = "XXXXXXXXXXXXXXXXXXXX";
const AWS_SECRET_ACCESS_KEY = "superSecretAccessKey";
const BUCKET_NAME = "my-bucket";
const BUCKET_REGION = "us-east-1";
const s3 = new aws.S3({
region: BUCKET_REGION,
accessKeyId: AWS_ACCESS_KEY_ID,
secretAccessKey: AWS_SECRET_ACCESS_KEY
});
function multipart(request){
return new Promise(async (resolve, reject) => {
const headers = request.headers;
const busboy = new Busboy({ headers });
// you may need to add cleanup logic using 'busboy.on' events
busboy.on('error', err => reject(err))
busboy.on('file', function (fieldName, fileStream, fileName, encoding, mimeType) {
const params = {
Bucket: BUCKET_NAME,
Key: fileName,
Body: fileStream
};
s3.upload(params).promise().then(() => resolve());
})
request.pipe(busboy)
})
}
router.post('/upload', async (req, res) => {
try {
await multipart(req)
res.status(200).send({ result: 'Success!' });
} catch (e) {
console.log(e)
res.status(500).send({ result: 'Fail!' });
}
});
module.exports = router;
据我所知,Postman 的行为应有尽有——“文本注入”实际上是 web 标准,用于在上传时识别/划分文件。 请参阅此MDN Web 文档以及此文档以了解原因。
无论文件类型如何,它实际上都在注入该部分:
let streamPass = new stream.PassThrough();
// adding this
const chunks = [];
streamPass.on('data', (chunk) => chunks.push(chunk) );
streamPass.on("end", () => {
body = Buffer.concat(chunks).toString();
console.log(chunks, chunks.length)
console.log("finished", body); // <-- see it here
});
我尝试了几种方法来控制/更改它,但没有一个简单的方法 - 从 Postman 端,我不认为这是一个可以更改的设置,从 NodeJS 端......我的意思是它是可能的,但解决方案很可能很笨重/复杂,我怀疑你不想要。 (虽然我可能是错的......)
鉴于上述情况,我将与@relief.melone 一起推荐multer
作为一个简单的解决方案。
如果您想将multer
与streams
一起使用,请尝试以下操作:(我已经指出我在哪里对您的代码进行了更改):
// const uploadStream = (key) => {
const uploadStream = (key, mime_type) => { // <- adding the mimetype
let streamPass = new stream.PassThrough();
let params = {
Bucket: BUCKET_NAME,
Key: key,
Body: streamPass,
ACL: 'public-read', // <- you can remove this
ContentType: mime_type // <- adding the mimetype
};
let streamPromise = s3.upload(params, (err, data) => {
if (err) {
console.error("ERROR: uploadStream:", err);
} else {
console.log("INFO: uploadStream:", data);
}
}).promise();
return {
streamPass: streamPass,
streamPromise: streamPromise
};
};
// router.post("/upload", async (req, res) => {
router.post("/upload", multer().single('file'), async (req, res) => { // <- we're adding multer
try {
let key = req.query.file_name;
// === change starts here
// console.log(req.file); // <- if you want to see, uncomment this file
let { streamPass, streamPromise } = uploadStream(key, req.file.mimetype); // adding the mimetype
var bufferStream = new stream.PassThrough();
bufferStream.end(req.file.buffer);
bufferStream.pipe(streamPass); // no longer req.pipe(streamPass);
// === change ends here
await streamPromise;
res.status(200).send({ result: "Success!" });
} catch (e) {
console.log(e)
res.status(500).send({ result: "Fail!" });
}
});
声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.