The goal: Upload large files to AWS Glacier without holding the whole file in memory.
I\'m currently uploading to glacier now using fs.readFileSync() and things are
You may consider using below snippet where we read file in chunk of 1024 bytes
var fs = require('fs');
var data = '';
var readStream = fs.createReadStream('/tmp/foo.txt',{ highWaterMark: 1 * 1024, encoding: 'utf8' });
readStream.on('data', function(chunk) {
data += chunk;
console.log('chunk Data : ')
console.log(chunk);// your processing chunk logic will go here
}).on('end', function() {
console.log('###################');
console.log(data);
// here you see all data processed at end of file
});
Please Note : highWaterMark is the parameter used for chunk size Hope this Helps!
Web Reference: https://stackabuse.com/read-files-with-node-js/ Changing readstream chunksize