diff --git a/lib/sqs/README.md b/lib/sqs/README.md index 89a010a..0fce201 100644 --- a/lib/sqs/README.md +++ b/lib/sqs/README.md @@ -11,12 +11,37 @@ const sqs = require('@sparkpost/aws').SQS({ }); ``` +Localstack configuration example + +```js +const sqs = require('@sparkpost/aws').SQS({ + account: '000000000000', + bypassProxy: false, + sqsEndpoint: 'localhost:4566', + sqsProtocol: 'http', + s3Endpoint: 'localhost:4566', + s3Protocol: 'http', + s3ForcePathStyle: true +}); +``` + `account` is required and is the AWS account ID. `queuePrefix` and `queueSuffix` are optional and default to the empty string. They are used in constructing the SQS queue URL. `defaultVisibilityTimeout` is optional and defaults to 300s. It sets the VisibilityTimeout value when retrieving messgaes from a queue. +`sqsEndpoint` is optional default to null. It sets the SQS endpoint used in all SQS requests. + +`sqsProtocol` is optional and defaults to https. It sets the protocol used +in SQS URL. + +`s3Endpoint` is optional default to null. It sets the S3 endpoint used in all S3 requests. + +`s3Protocol` is optional and defaults to https. It sets the protocol used +in S3 URL. + +`s3ForcePathStyle` is optional and default to null. It sets whether to force path style URLs for S3 objects. It's useful when running Localstack. ## getQueueURL diff --git a/lib/sqs/index.js b/lib/sqs/index.js index 4738cb8..7756f2f 100644 --- a/lib/sqs/index.js +++ b/lib/sqs/index.js @@ -18,7 +18,7 @@ const MAX_MSG_SIZE = 262085; * Max size of message that does not need to be compressed as it can be sent via 1 SQS Operation * 64kb => b: (64kb * 1024 bytes = 65536 bytes - 50 bytes = 65486) * -50 bytes to compensate for size increase after compressing with no compression level -*/ + */ const MAX_COMPRESS_MSG_SIZE = 65486; /** @@ -36,22 +36,54 @@ module.exports = ({ defaultVisibilityTimeout = 300, bypassProxy = true, longPollingWaitTime = 20, - sqsEndpoint = null + sqsEndpoint = null, + sqsProtocol = 'https', + s3Endpoint = null, + s3Protocol = 'https', + s3ForcePathStyle = null }) => { - let ep = {}; + let endpointForSQS = {} + ,endpointForS3 = {}; + if (sqsEndpoint) { - ep = { endpoint: new AWS.Endpoint(sqsEndpoint) }; + endpointForSQS = { + endpoint: new AWS.Endpoint(`${sqsProtocol}://${sqsEndpoint}`) + }; + } + + if (s3Endpoint) { + endpointForS3 = { + endpoint: new AWS.Endpoint(`${s3Protocol}://${s3Endpoint}`) + }; } - const SQS = setHttpAgent({ ...clientConfig, ...ep }, bypassProxy, AWS.SQS); - const S3 = setHttpAgent(clientConfig, bypassProxy, AWS.S3); + + const SQS = setHttpAgent( + { + ...clientConfig, + ...endpointForSQS + }, + bypassProxy, + AWS.SQS + ); + + const S3 = setHttpAgent( + { + ...clientConfig, + ...endpointForS3, + ...(s3ForcePathStyle ? { s3ForcePathStyle } : {}) + }, + bypassProxy, + AWS.S3 + ); promisify(SQS); promisify(S3); function getQueueURL(name) { const sqsPrefix = sqsEndpoint - ? `https://${sqsEndpoint}` - : `https://sqs.${AWS.config.region}.amazonaws.com`; + ? `${sqsProtocol}://${sqsEndpoint}` + : `${sqsProtocol}://sqs.${AWS.config.region}.amazonaws.com`; + return `${sqsPrefix}/${account}/${queuePrefix}${name}${queueSuffix}`; } diff --git a/package-lock.json b/package-lock.json index 42a4dbe..e4f5c71 100644 --- a/package-lock.json +++ b/package-lock.json @@ -532,8 +532,7 @@ "dependencies": { "ansi-regex": { "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", - "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", + "resolved": "", "dev": true }, "ansi-styles": { @@ -2232,8 +2231,7 @@ }, "ansi-regex": { "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", - "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", + "resolved": "", "dev": true }, "ansi-styles": { @@ -3886,9 +3884,9 @@ }, "dependencies": { "ansi-regex": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", - "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", "dev": true }, "ansi-styles": { diff --git a/test/unit/lib/sqs.spec.js b/test/unit/lib/sqs.spec.js index cb62371..81220a2 100644 --- a/test/unit/lib/sqs.spec.js +++ b/test/unit/lib/sqs.spec.js @@ -21,7 +21,8 @@ describe('SQS Utilities', function() { ,testConfig ,s3Config ,awsMock - ,queueUrl; + ,queueUrl + ,endpointUrls; beforeEach(function() { result = 'result'; @@ -50,6 +51,8 @@ describe('SQS Utilities', function() { Body: compress(JSON.stringify({ expect: 'a passing test!' })) }; + endpointUrls = []; + awsMock = { config: { region: 'Winterfel' @@ -66,7 +69,8 @@ describe('SQS Utilities', function() { } }, Endpoint: class { - constructor() { + constructor(url) { + endpointUrls.push(url); return 'this is the endpoint'; } } @@ -82,10 +86,11 @@ describe('SQS Utilities', function() { sqsInstance = sqs(testConfig); }); - it('should return a queue name', function() { + it('should return a queue name and not have endpoints set', function() { expect(sqsInstance.getQueueURL('webhooks')).to.equal( 'https://sqs.Winterfel.amazonaws.com/Stark/etl_webhooks_ending' ); + expect(endpointUrls).to.have.lengthOf(0); }); it('should use the specified endpoint in queue names', function() { @@ -100,6 +105,70 @@ describe('SQS Utilities', function() { expect(s3Config).to.not.have.keys(['endpoint']); }); + it('should use the specified protocol and endpoint for sqs in queue names', function() { + const sqsEndpointInstance = sqs({ + ...testConfig, + ...{ + sqsProtocol: 'http', + sqsEndpoint: 'localhost' + } + }); + + expect(sqsEndpointInstance.getQueueURL('webhooks')).to.equal( + 'http://localhost/Stark/etl_webhooks_ending' + ); + + expect(endpointUrls).to.have.lengthOf(1); + expect(endpointUrls[0]).to.equal('http://localhost'); + // S3 should not be using the same endpoint, nor path style flag + expect(s3Config).to.not.have.keys(['endpoint', 's3ForcePathStyle']); + }); + + it('should use the specified protocol and endpoint for s3', function() { + const s3EndpointInstance = sqs({ + ...testConfig, + ...{ + s3Protocol: 'http', + s3Endpoint: '0.0.0.0' + } + }); + + // SQS Queue url is not changed due to s3 config + expect(s3EndpointInstance.getQueueURL('webhooks')).to.equal( + 'https://sqs.Winterfel.amazonaws.com/Stark/etl_webhooks_ending' + ); + + expect(endpointUrls).to.have.lengthOf(1); + expect(endpointUrls[0]).to.equal('http://0.0.0.0'); + + expect(s3Config).to.have.property('endpoint'); + expect(s3Config).to.not.have.property('s3ForcePathStyle'); + }); + + it('should use endpoints for sqs and s3 if specified', function() { + const s3EndpointInstance = sqs({ + ...testConfig, + ...{ + sqsProtocol: 'http', + sqsEndpoint: 'localhost', + s3Protocol: 'http', + s3Endpoint: '0.0.0.0', + s3ForcePathStyle: true + } + }); + + expect(s3EndpointInstance.getQueueURL('webhooks')).to.equal( + 'http://localhost/Stark/etl_webhooks_ending' + ); + + expect(endpointUrls).to.have.lengthOf(2); + expect(endpointUrls[0]).to.equal('http://localhost'); + expect(endpointUrls[1]).to.equal('http://0.0.0.0'); + + expect(s3Config).to.have.property('endpoint'); + expect(s3Config.s3ForcePathStyle).to.equals(true); + }); + it('should default prefix and suffix to the empty string name', function() { delete testConfig.queuePrefix; delete testConfig.queueSuffix; @@ -737,7 +806,10 @@ describe('SQS Utilities', function() { expect(res).to.equal('result'); expect(sqsMock.deleteMessageBatch.callCount).to.equal(1); expect(sqsMock.deleteMessageBatch.args[0][0]).to.deep.equal({ - Entries: [{ Id: 1, foo: 'bar' }, { Id: 2, foo: 'bat' }], + Entries: [ + { Id: 1, foo: 'bar' }, + { Id: 2, foo: 'bat' } + ], QueueUrl: queueUrl }); });