我有一个接收文件并创建后台任务的端点,用于将这些文件上传到S3。
为了后台文件上传,我使用了议程(https://github.com/agenda/agenda)。唯一的限制是我需要以MongoDB支持的格式存储文件(这是Agenda在幕后使用的格式)。为此,我将文件转换为缓冲区,然后再将其发送到议程。
这是我的代码:
Mutation: {
batchCreateProgressPics: combineResolvers(
isAuthenticated,
async (parent, { pics }, { models, currentUser }) => {
return await Promise.all(
pics.map(async (pic, i) => {
const { file, bodyPart, localPath } = pic;
const { createReadStream } = await file;
const stream = createReadStream();
console.log("Setting up buffer...");
const buffer = await new Promise((resolve, reject) => {
var buffers = [];
stream.on("data", function(data) {
buffers.push(data);
});
stream.on("end", function() {
const everything = Buffer.concat(buffers);
resolve(everything);
});
stream.on("error", function(e) {
reject(e);
});
});
const progressPic = await models.ProgressPic.create({
bodyPart,
user: currentUser.id,
url: localPath,
});
console.log("Creating backgruond task...");
Agenda.now("uploadProgressPic", {
userId: currentUser.id,
progressPicId: progressPic.id,
filename: `${progressPic.id}-${bodyPart}.jpg`,
buffer,
});
console.log("Done.");
return progressPic;
})
);
}
),
},
这在我的本地开发服务器上速度很快,但是由于存在缓冲区,要花很长时间才能在生产中运行。
console.log(Setting up buffer...)
之后的行需要很长时间。我想做的是:
创建并返回一个progressPics数组,每个数组
pics
元素一个在发送响应之后,对缓冲区进行处理,以免缓冲前端。
这可能吗?
============更新==========
因此,如果我不遵守承诺,它将抱怨请求在缓冲区完成之前就断开了:
const uploadProgressPic = async ({ file, progressPicId, userId, bodyPart }) => {
try {
const { createReadStream } = await file;
const stream = createReadStream();
console.log("Setting up buffer...");
const buffer = await new Promise((resolve, reject) => {
var buffers = [];
stream.on("data", function(data) {
buffers.push(data);
});
stream.on("end", function() {
const everything = Buffer.concat(buffers);
resolve(everything);
});
stream.on("error", function(e) {
reject(e);
});
});
console.log("Done.");
console.log("Creating backgruond task...");
Agenda.now("uploadProgressPic", {
userId,
progressPicId,
filename: `${progressPicId}-${bodyPart}.jpg`,
buffer,
});
} catch (error) {
console.log("ERROR OCCURRED: ", error);
}
};
export default {
Mutation: {
batchCreateProgressPics: combineResolvers(
isAuthenticated,
async (parent, { pics }, { models, currentUser }) => {
return pics.map(async (pic, i) => {
const { file, bodyPart, localPath } = pic;
const progressPic = await models.ProgressPic.create({
bodyPart,
user: currentUser.id,
url: localPath,
});
uploadProgressPic({
file,
progressPicId: progressPic.id,
userId: currentUser.id,
bodyPart,
});
return progressPic;
});
}
),
},
};
错误:
ERROR OCCURRED: BadRequestError: Request disconnected during file upload stream parsing.
at IncomingMessage.<anonymous> (/Users/edmundmai/Documents/src/acne-tracker/server/node_modules/graphql-upload/lib/processRequest.js:300:35)
at Object.onceWrapper (events.js:291:20)
at IncomingMessage.emit (events.js:203:13)
at IncomingMessage.EventEmitter.emit (domain.js:471:20)
at resOnFinish (_http_server.js:614:7)
at ServerResponse.emit (events.js:208:15)
at ServerResponse.EventEmitter.emit (domain.js:471:20)
at onFinish (_http_outgoing.js:649:10)
at onCorkedFinish (_stream_writable.js:678:5)
at afterWrite (_stream_writable.js:483:3)
at processTicksAndRejections (internal/process/task_queues.js:77:11) {
message: 'Request disconnected during file upload stream parsing.',
expose: true,
statusCode: 499,
status: 499
}
==========更新2 =============
即使试图1)简化它,2)将
await
移到createReadStream()
之外,也会显示相同的错误:const uploadProgressPic = async ({
stream,
progressPicId,
userId,
bodyPart,
models,
}) => {
try {
console.log("Uploading to S3...");
const { Location: url, Key: key, Bucket: bucket } = await S3.upload({
stream,
folder: userId,
filename: `${progressPicId}-${bodyPart}.jpg`,
});
if (url && key && bucket) {
await models.ProgressPic.findOneAndUpdate(
{ _id: progressPicId },
{ $set: { url, key, bucket } },
{ new: true, useFindAndModify: false }
);
console.log("Done!");
}
} catch (error) {
console.log("ERROR OCCURRED: ", error);
}
};
export default {
Mutation: {
batchCreateProgressPics: combineResolvers(
isAuthenticated,
async (parent, { pics }, { models, currentUser }) => {
return pics.map(async (pic, i) => {
const { file, bodyPart, localPath } = pic;
const progressPic = await models.ProgressPic.create({
bodyPart,
user: currentUser.id,
url: localPath,
});
const { createReadStream } = await file;
const stream = createReadStream();
uploadProgressPic({
stream,
progressPicId: progressPic.id,
userId: currentUser.id,
bodyPart,
models,
});
return progressPic;
});
}
),
},
};
错误:
Uploading to S3...
Uploading to S3...
Uploading to S3...
ERROR OCCURRED: BadRequestError: Request disconnected during file upload stream parsing.
at IncomingMessage.<anonymous> (/Users/edmundmai/Documents/src/acne-tracker/server/node_modules/graphql-upload/lib/processRequest.js:300:35)
at Object.onceWrapper (events.js:291:20)
at IncomingMessage.emit (events.js:203:13)
at IncomingMessage.EventEmitter.emit (domain.js:471:20)
at resOnFinish (_http_server.js:614:7)
at ServerResponse.emit (events.js:208:15)
at ServerResponse.EventEmitter.emit (domain.js:471:20)
at onFinish (_http_outgoing.js:649:10)
at onCorkedFinish (_stream_writable.js:678:5)
at afterWrite (_stream_writable.js:483:3)
at processTicksAndRejections (internal/process/task_queues.js:77:11) {
message: 'Request disconnected during file upload stream parsing.',
expose: true,
statusCode: 499,
status: 499
}
Done!
有趣的是,即使抱怨,我仍然在日志中看到一些
uploadProgressPic
? 最佳答案
不是这个主题的专家,但是我有一个可行的想法和一个理论:
想法:如果您要处理的图像数量过多,则可能是由于Promise.all()等待而引起的。我建议您使用异步中的parallelLimit来限制一次执行的并行功能,否则会出现性能问题。
理论:也许您可以在每次使用Buffer
之后释放内存分配,以避免出现内存泄漏问题并使服务器性能更高。
如果我无论如何都错了,请纠正我。我本人对此问题的结果感兴趣。