aws lambda function getting access denied when getObject from s3

匿名 (未验证) 提交于 2019-12-03 01:25:01

问题:

I am getting acccess denied error from S3 aws service on my lambda function

this is the code

// dependencies var async = require('async'); var AWS = require('aws-sdk'); var gm = require('gm').subClass({ imageMagick: true }); // Enable ImageMagick integration.  exports.handler = function(event, context) {     var srcBucket = event.Records[0].s3.bucket.name;     // Object key may have spaces or unicode non-ASCII characters.     var key = decodeURIComponent(event.Records[0].s3.object.key.replace(/\+/g, " ")); /* {     originalFilename: ,     versions: [         {             size: ,             crop: [x,y],             max: [x, y],             rotate:          }     ] }*/     var fileInfo;     var dstBucket = "xmovo.transformedimages.develop";     try {         //TODO: Decompress and decode the returned value         fileInfo = JSON.parse(key);         //download s3File          // get reference to S3 client         var s3 = new AWS.S3();          // Download the image from S3 into a buffer.         s3.getObject({                 Bucket: srcBucket,                 Key: key             },             function (err, response) {                 if (err) {                     console.log("Error getting from s3: >>> " + err + "::: Bucket-Key >>>" + srcBucket + "-" + key + ":::Principal>>>" + event.Records[0].userIdentity.principalId, err.stack);                     return;                 }                  // Infer the image type.                 var img = gm(response.Body);                 var imageType = null;                 img.identify(function (err, data) {                     if (err) {                         console.log("Error image type: >>> " + err);                         deleteFromS3(srcBucket, key);                         return;                     }                     imageType = data.format;                      //foreach of the versions requested                     async.each(fileInfo.versions, function (currentVersion, callback) {                         //apply transform                         async.waterfall([async.apply(transform, response, currentVersion), uploadToS3, callback]);                      }, function (err) {                         if (err) console.log("Error on excecution of watefall: >>> " + err);                         else {                             //when all done then delete the original image from srcBucket                             deleteFromS3(srcBucket, key);                         }                     });                 });             });     }     catch (ex){         context.fail("exception through: " + ex);         deleteFromS3(srcBucket, key);         return;     }         function transform(response, version, callback){             var imageProcess = gm(response.Body);             if (version.rotate!=0) imageProcess = imageProcess.rotate("black",version.rotate);             if(version.size!=null) {                 if (version.crop != null) {                     //crop the image from the coordinates                     imageProcess=imageProcess.crop(version.size[0], version.size[1], version.crop[0], version.crop[1]);                 }                 else {                     //find the bigger and resize proportioned the other dimension                     var widthIsMax = version.size[0]>version.size[1];                     var maxValue = Math.max(version.size[0],version.size[1]);                     imageProcess=(widthIsMax)?imageProcess.resize(maxValue):imageProcess.resize(null, maxValue);                 }             }               //finally convert the image to jpg 90%             imageProcess.toBuffer("jpg",{quality:90}, function(err, buffer){                 if (err) callback(err);                 callback(null, version, "image/jpeg", buffer);             });          }          function deleteFromS3(bucket, filename){             s3.deleteObject({                 Bucket: bucket,                 Key: filename             });         }          function uploadToS3(version, contentType, data, callback) {             // Stream the transformed image to a different S3 bucket.             var dstKey = fileInfo.originalFilename + "_" + version.size + ".jpg";             s3.putObject({                 Bucket: dstBucket,                 Key: dstKey,                 Body: data,                 ContentType: contentType             }, callback);         } }; 

This is the error on cloudwatch

AccessDenied: Access Denied 

This is the stack error

at Request.extractError (/var/runtime/node_modules/aws-sdk/lib/services/s3.js:329:35)  at Request.callListeners (/var/runtime/node_modules/aws-sdk/lib/sequential_executor.js:105:20)   at Request.emit (/var/runtime/node_modules/aws-sdk/lib/sequential_executor.js:77:10)  at Request.emit (/var/runtime/node_modules/aws-sdk/lib/request.js:596:14)  at Request.transition (/var/runtime/node_modules/aws-sdk/lib/request.js:21:10)   at AcceptorStateMachine.runTo (/var/runtime/node_modules/aws-sdk/lib/state_machine.js:14:12)   at /var/runtime/node_modules/aws-sdk/lib/state_machine.js:26:10   at Request. (/var/runtime/node_modules/aws-sdk/lib/request.js:37:9)   at Request. (/var/runtime/node_modules/aws-sdk/lib/request.js:598:12)   at Request.callListeners (/var/runtime/node_modules/aws-sdk/lib/sequential_executor.js:115:18) 

Without any other description or info on s3 bucket permissions allow to everyone put list and delete

What can i do to access the S3 bucket

PS: on lambda event properties the principal is correct and have administrative privileges

回答1:

Your Lambda does not have privileges (S3:GetObject).

Go to IAM dashboard, check the role associated with your Lambda execution. If you use AWS wizard, it automatically creates a role called oneClick_lambda_s3_exec_role. Click on Show Policy. It should show something similar to the attached image. Make sure S3:GetObject is listed.



回答2:

I ran into this issue and after hours of IAM policy madness, the solution was to:

  1. Go to S3 console
  2. Click bucket you are interested in.
  3. Click 'Properties'
  4. Unfold 'Permissions'
  5. Click 'Add more permissions'
  6. Choose 'Any Authenticated AWS User' from dropdown. Select 'Upload/Delete' and 'List' (or whatever you need for your lambda).
  7. Click 'Save'

Done. Your carefully written IAM role policies don't matter, neither do specific bucket policies (I've written those too to make it work). Or they just don't work on my account, who knows.

[EDIT]

After a lot of tinkering the above approach is not the best. Try this:

  1. Keep your role policy as in the helloV post.
  2. Go to S3. Select your bucket. Click Permissions. Click Bucket Policy.
  3. Try something like this:
{     "Version": "2012-10-17",     "Id": "Lambda access bucket policy",     "Statement": [         {             "Sid": "All on objects in bucket lambda",             "Effect": "Allow",             "Principal": {                 "AWS": "arn:aws:iam::AWSACCOUNTID:root"             },             "Action": "s3:*",             "Resource": "arn:aws:s3:::BUCKET-NAME/*"         },         {             "Sid": "All on bucket by lambda",             "Effect": "Allow",             "Principal": {                 "AWS": "arn:aws:iam::AWSACCOUNTID:root"             },             "Action": "s3:*",             "Resource": "arn:aws:s3:::BUCKET-NAME"         }     ] } 

Worked for me and does not require for you to share with all authenticated AWS users (which most of the time is not ideal).



回答3:

interestingly enough AWS returns 403 (access denied) when the file does not exist. Be sure file is in the bucket



回答4:

I too ran into this issue, I fixed this by providing s3:GetObject* in the ACL as it is attempting to obtain a version of that object.



回答5:

If you are specifying the Resource don't forget to add the sub folder specification as well. Like this:

"Resource": [   "arn:aws:s3:::BUCKET-NAME",   "arn:aws:s3:::BUCKET-NAME/*" ] 


回答6:

This maybe helpful to some people.

If you are still getting Access Denied error even after configuring proper IAM role with s3 permission policies, make sure your code is correct. I had a bug in my code and it was showing up in the logs as Access denied.



回答7:

I was struggling with this issue for hours. I was using AmazonS3EncryptionClient and nothing I did helped. Then I noticed that the client is actually deprecated, so I thought I'd try switching to the builder model they have:

var builder = AmazonS3EncryptionClientBuilder.standard()   .withEncryptionMaterials(new StaticEncryptionMaterialsProvider(encryptionMaterials)) if (accessKey.nonEmpty && secretKey.nonEmpty) builder = builder.withCredentials(new AWSStaticCredentialsProvider(new BasicAWSCredentials(accessKey.get, secretKey.get))) builder.build() 

And... that solved it. Looks like Lambda has trouble injecting the credentials in the old model, but works well in the new one.



易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!