diff --git a/packages/@aws-cdk/core/lib/cloudformation/arn.ts b/packages/@aws-cdk/core/lib/cloudformation/arn.ts index 0f8687813efb4..1a2e8a8e93869 100644 --- a/packages/@aws-cdk/core/lib/cloudformation/arn.ts +++ b/packages/@aws-cdk/core/lib/cloudformation/arn.ts @@ -1,4 +1,5 @@ import { AwsAccountId, AwsPartition, AwsRegion, FnConcat, Token } from '..'; +import { FnSelect, FnSplit } from '../cloudformation/fn'; /** * An Amazon Resource Name (ARN). @@ -122,6 +123,66 @@ export class Arn extends Token { return result; } + + /** + * Given a Token evaluating to ARN, parses it and returns components. + * + * The ARN cannot be validated, since we don't have the actual value yet + * at the time of this function call. You will have to know the separator + * and the type of ARN. + * + * The resulting `ArnComponents` object will contain tokens for the + * subexpressions of the ARN, not string literals. + * + * WARNING: this function cannot properly parse the complete final + * resourceName (path) out of ARNs that use '/' to both separate the + * 'resource' from the 'resourceName' AND to subdivide the resourceName + * further. For example, in S3 ARNs: + * + * arn:aws:s3:::my_corporate_bucket/path/to/exampleobject.png + * + * After parsing the resourceName will not contain 'path/to/exampleobject.png' + * but simply 'path'. This is a limitation because there is no slicing + * functionality in CloudFormation templates. + * + * @param arn The input token that contains an ARN + * @param sep The separator used to separate resource from resourceName + * @param hasName Whether there is a name component in the ARN at all. + * For example, SNS Topics ARNs have the 'resource' component contain the + * topic name, and no 'resourceName' component. + * @returns an ArnComponents object which allows access to the various + * components of the ARN. + */ + public static parseToken(arn: Token, sep: string = '/', hasName: boolean = true): ArnComponents { + // Arn ARN looks like: + // arn:partition:service:region:account-id:resource + // arn:partition:service:region:account-id:resourcetype/resource + // arn:partition:service:region:account-id:resourcetype:resource + + // We need the 'hasName' argument because {Fn::Select}ing a nonexistent field + // throws an error. + + const components = new FnSplit(':', arn); + + const partition = new FnSelect(1, components); + const service = new FnSelect(2, components); + const region = new FnSelect(3, components); + const account = new FnSelect(4, components); + + if (sep === ':') { + const resource = new FnSelect(5, components); + const resourceName = hasName ? new FnSelect(6, components) : undefined; + + return { partition, service, region, account, resource, resourceName, sep }; + } else { + const lastComponents = new FnSplit(sep, new FnSelect(5, components)); + + const resource = new FnSelect(0, lastComponents); + const resourceName = hasName ? new FnSelect(1, lastComponents) : undefined; + + return { partition, service, region, account, resource, resourceName, sep }; + } + } } export interface ArnComponents { @@ -133,13 +194,13 @@ export interface ArnComponents { * * @default The AWS partition the stack is deployed to. */ - partition?: string; + partition?: any; /** * The service namespace that identifies the AWS product (for example, * 's3', 'iam', 'codepipline'). */ - service: string; + service: any; /** * The region the resource resides in. Note that the ARNs for some resources @@ -147,7 +208,7 @@ export interface ArnComponents { * * @default The region the stack is deployed to. */ - region?: string; + region?: any; /** * The ID of the AWS account that owns the resource, without the hyphens. diff --git a/packages/@aws-cdk/core/test/cloudformation/test.arn.ts b/packages/@aws-cdk/core/test/cloudformation/test.arn.ts index 78dbd05f3def7..965cd0dbdb7ca 100644 --- a/packages/@aws-cdk/core/test/cloudformation/test.arn.ts +++ b/packages/@aws-cdk/core/test/cloudformation/test.arn.ts @@ -1,5 +1,5 @@ import { Test } from 'nodeunit'; -import { Arn, ArnComponents, resolve } from '../../lib'; +import { Arn, ArnComponents, resolve, Token } from '../../lib'; export = { 'create from components with defaults'(test: Test) { @@ -187,7 +187,36 @@ export = { }); test.done(); - } + }, + + 'a Token with : separator'(test: Test) { + const theToken = { Ref: 'SomeParameter' }; + const parsed = Arn.parseToken(new Token(() => theToken), ':'); + + test.deepEqual(resolve(parsed.partition), { 'Fn::Select': [ 1, { 'Fn::Split': [ ':', theToken ]} ]}); + test.deepEqual(resolve(parsed.service), { 'Fn::Select': [ 2, { 'Fn::Split': [ ':', theToken ]} ]}); + test.deepEqual(resolve(parsed.region), { 'Fn::Select': [ 3, { 'Fn::Split': [ ':', theToken ]} ]}); + test.deepEqual(resolve(parsed.account), { 'Fn::Select': [ 4, { 'Fn::Split': [ ':', theToken ]} ]}); + test.deepEqual(resolve(parsed.resource), { 'Fn::Select': [ 5, { 'Fn::Split': [ ':', theToken ]} ]}); + test.deepEqual(resolve(parsed.resourceName), { 'Fn::Select': [ 6, { 'Fn::Split': [ ':', theToken ]} ]}); + test.equal(parsed.sep, ':'); + + test.done(); + }, + 'a Token with / separator'(test: Test) { + const theToken = { Ref: 'SomeParameter' }; + const parsed = Arn.parseToken(new Token(() => theToken)); + + test.equal(parsed.sep, '/'); + + // tslint:disable-next-line:max-line-length + test.deepEqual(resolve(parsed.resource), { 'Fn::Select': [ 0, { 'Fn::Split': [ '/', { 'Fn::Select': [ 5, { 'Fn::Split': [ ':', theToken ]} ]} ]} ]}); + // tslint:disable-next-line:max-line-length + test.deepEqual(resolve(parsed.resourceName), { 'Fn::Select': [ 1, { 'Fn::Split': [ '/', { 'Fn::Select': [ 5, { 'Fn::Split': [ ':', theToken ]} ]} ]} ]}); + + test.done(); + } }, -}; + +}; \ No newline at end of file diff --git a/packages/@aws-cdk/kinesis/lib/stream.ts b/packages/@aws-cdk/kinesis/lib/stream.ts index e3c4e542abe7a..97170466dfc6c 100644 --- a/packages/@aws-cdk/kinesis/lib/stream.ts +++ b/packages/@aws-cdk/kinesis/lib/stream.ts @@ -1,6 +1,8 @@ -import { Construct, Output, PolicyStatement, Token } from '@aws-cdk/core'; -import { IIdentityResource } from '@aws-cdk/iam'; +import { Arn, AwsRegion, Construct, FnConcat, HashedAddressingScheme, Output, + PolicyStatement, ServicePrincipal, Stack, Token } from '@aws-cdk/core'; +import { IIdentityResource, Role } from '@aws-cdk/iam'; import * as kms from '@aws-cdk/kms'; +import logs = require('@aws-cdk/logs'); import { cloudformation, StreamArn } from './kinesis.generated'; /** @@ -37,7 +39,7 @@ export interface StreamRefProps { * StreamRef.import(this, 'MyImportedStream', ref); * */ -export abstract class StreamRef extends Construct { +export abstract class StreamRef extends Construct implements logs.ILogSubscriptionDestination { /** * Creates a Stream construct that represents an external stream. * @@ -55,11 +57,21 @@ export abstract class StreamRef extends Construct { */ public abstract readonly streamArn: StreamArn; + /** + * The name of the stream + */ + public abstract readonly streamName: StreamName; + /** * Optional KMS encryption key associated with this stream. */ public abstract readonly encryptionKey?: kms.EncryptionKeyRef; + /** + * The role that can be used by CloudWatch logs to write to this stream + */ + private cloudWatchLogsRole?: Role; + /** * Exports this stream from the stack. */ @@ -159,6 +171,62 @@ export abstract class StreamRef extends Construct { ); } + public logSubscriptionDestination(sourceLogGroup: logs.LogGroup): logs.LogSubscriptionDestination { + // Following example from https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/SubscriptionFilters.html#DestinationKinesisExample + if (!this.cloudWatchLogsRole) { + // Create a role to be assumed by CWL that can write to this stream and pass itself. + this.cloudWatchLogsRole = new Role(this, 'CloudWatchLogsCanPutRecords', { + assumedBy: new ServicePrincipal(new FnConcat('logs.', new AwsRegion(), '.amazonaws.com')), + }); + this.cloudWatchLogsRole.addToPolicy(new PolicyStatement().addAction('kinesis:PutRecord').addResource(this.streamArn)); + this.cloudWatchLogsRole.addToPolicy(new PolicyStatement().addAction('iam:PassRole').addResource(this.cloudWatchLogsRole.roleArn)); + } + + // We've now made it possible for CloudWatch events to write to us. In case the LogGroup is in a + // different account, we must add a Destination in between as well. + const sourceStack = Stack.find(sourceLogGroup); + const thisStack = Stack.find(this); + + // Case considered: if both accounts are undefined, we can't make any assumptions. Better + // to assume we don't need to do anything special. + const sameAccount = sourceStack.env.account === thisStack.env.account; + + if (!sameAccount) { + return this.crossAccountLogSubscriptionDestination(sourceLogGroup); + } + + return { arn: this.streamArn, role: this.cloudWatchLogsRole }; + } + + /** + * Generate a CloudWatch Logs Destination and return the properties in the form o a subscription destination + */ + private crossAccountLogSubscriptionDestination(sourceLogGroup: logs.LogGroup): logs.LogSubscriptionDestination { + const sourceStack = Stack.find(sourceLogGroup); + const thisStack = Stack.find(this); + + if (!sourceStack.env.account || !thisStack.env.account) { + throw new Error('SubscriptionFilter stack and Destination stack must either both have accounts defined, or both not have accounts'); + } + + // Take some effort to construct a unique ID for the destination that is unique to the + // combination of (stream, loggroup). + const uniqueId = new HashedAddressingScheme().allocateAddress([sourceLogGroup.path.replace('/', ''), sourceStack.env.account!]); + + // The destination lives in the target account + const dest = new logs.CrossAccountDestination(this, `CWLDestination${uniqueId}`, { + targetArn: this.streamArn, + role: this.cloudWatchLogsRole! + }); + + dest.addToPolicy(new PolicyStatement() + .addAction('logs:PutSubscriptionFilter') + .addAwsAccountPrincipal(sourceStack.env.account) + .addAllResources()); + + return dest.logSubscriptionDestination(sourceLogGroup); + } + private grant(identity: IIdentityResource, actions: { streamActions: string[], keyActions: string[] }) { identity.addToPolicy(new PolicyStatement() .addResource(this.streamArn) @@ -307,12 +375,16 @@ export class StreamName extends Token {} class ImportedStreamRef extends StreamRef { public readonly streamArn: StreamArn; + public readonly streamName: StreamName; public readonly encryptionKey?: kms.EncryptionKeyRef; constructor(parent: Construct, name: string, props: StreamRefProps) { super(parent, name); this.streamArn = props.streamArn; + // Get the name from the ARN + this.streamName = Arn.parseToken(props.streamArn).resourceName; + if (props.encryptionKey) { this.encryptionKey = kms.EncryptionKeyRef.import(parent, 'Key', props.encryptionKey); } else { diff --git a/packages/@aws-cdk/kinesis/package.json b/packages/@aws-cdk/kinesis/package.json index 6a3a57f3745eb..ad5620e03b1e9 100644 --- a/packages/@aws-cdk/kinesis/package.json +++ b/packages/@aws-cdk/kinesis/package.json @@ -41,6 +41,7 @@ "dependencies": { "@aws-cdk/core": "^0.7.3-beta", "@aws-cdk/iam": "^0.7.3-beta", - "@aws-cdk/kms": "^0.7.3-beta" + "@aws-cdk/kms": "^0.7.3-beta", + "@aws-cdk/logs": "^0.7.3-beta" } } diff --git a/packages/@aws-cdk/kinesis/test/test.subscriptiondestination.ts b/packages/@aws-cdk/kinesis/test/test.subscriptiondestination.ts new file mode 100644 index 0000000000000..28a13131af574 --- /dev/null +++ b/packages/@aws-cdk/kinesis/test/test.subscriptiondestination.ts @@ -0,0 +1,80 @@ +import { expect, haveResource } from '@aws-cdk/assert'; +import { Stack } from '@aws-cdk/core'; +import { FilterPattern, LogGroup, SubscriptionFilter } from '@aws-cdk/logs'; +import { Test } from 'nodeunit'; +import { Stream } from '../lib'; + +export = { + 'stream can be subscription destination'(test: Test) { + // GIVEN + const stack = new Stack(); + const stream = new Stream(stack, 'MyStream'); + const logGroup = new LogGroup(stack, 'LogGroup'); + + // WHEN + new SubscriptionFilter(stack, 'Subscription', { + logGroup, + destination: stream, + filterPattern: FilterPattern.allEvents() + }); + + // THEN: subscription target is Stream + expect(stack).to(haveResource('AWS::Logs::SubscriptionFilter', { + DestinationArn: { "Fn::GetAtt": [ "MyStream5C050E93", "Arn" ] }, + RoleArn: { "Fn::GetAtt": [ "MyStreamCloudWatchLogsCanPutRecords58498490", "Arn" ] }, + })); + + // THEN: we have a role to write to the Lambda + expect(stack).to(haveResource('AWS::IAM::Role', { + AssumeRolePolicyDocument: { + Statement: [{ + Action: "sts:AssumeRole", + Principal: { Service: { "Fn::Join": ["", ["logs.", {Ref: "AWS::Region"}, ".amazonaws.com"]] }} + }], + } + })); + + expect(stack).to(haveResource('AWS::IAM::Policy', { + PolicyDocument: { + Statement: [ + { + Action: "kinesis:PutRecord", + Effect: "Allow", + Resource: { "Fn::GetAtt": [ "MyStream5C050E93", "Arn" ] } + }, + { + Action: "iam:PassRole", + Effect: "Allow", + Resource: { "Fn::GetAtt": [ "MyStreamCloudWatchLogsCanPutRecords58498490", "Arn" ] } + } + ], + } + })); + + test.done(); + }, + + 'cross-account stream can be subscription destination with Destination'(test: Test) { + // GIVEN + const sourceStack = new Stack(undefined, undefined, { env: { account: '12345' }}); + const logGroup = new LogGroup(sourceStack, 'LogGroup'); + + const destStack = new Stack(undefined, undefined, { env: { account: '67890' }}); + const stream = new Stream(destStack, 'MyStream'); + + // WHEN + new SubscriptionFilter(sourceStack, 'Subscription', { + logGroup, + destination: stream, + filterPattern: FilterPattern.allEvents() + }); + + // THEN: the source stack has a Destination object that the subscription points to + expect(destStack).to(haveResource('AWS::Logs::Destination', { + TargetArn: { "Fn::GetAtt": [ "MyStream5C050E93", "Arn" ] }, + RoleArn: { "Fn::GetAtt": [ "MyStreamCloudWatchLogsCanPutRecords58498490", "Arn" ] }, + })); + + test.done(); + } +}; diff --git a/packages/@aws-cdk/lambda/lib/lambda-ref.ts b/packages/@aws-cdk/lambda/lib/lambda-ref.ts index 737953f6921e0..7224273fa1135 100644 --- a/packages/@aws-cdk/lambda/lib/lambda-ref.ts +++ b/packages/@aws-cdk/lambda/lib/lambda-ref.ts @@ -1,8 +1,9 @@ import { Metric, MetricCustomization } from '@aws-cdk/cloudwatch'; -import { AccountPrincipal, Arn, Construct, FnSelect, FnSplit, PolicyPrincipal, - PolicyStatement, resolve, ServicePrincipal, Token } from '@aws-cdk/core'; +import { AccountPrincipal, Arn, AwsRegion, Construct, FnConcat, FnSelect, FnSplit, + PolicyPrincipal, PolicyStatement, resolve, ServicePrincipal, Token } from '@aws-cdk/core'; import { EventRuleTarget, IEventRuleTarget } from '@aws-cdk/events'; import { Role } from '@aws-cdk/iam'; +import logs = require('@aws-cdk/logs'); import { cloudformation, FunctionArn } from './lambda.generated'; import { LambdaPermission } from './permission'; @@ -23,7 +24,7 @@ export interface LambdaRefProps { role?: Role; } -export abstract class LambdaRef extends Construct implements IEventRuleTarget { +export abstract class LambdaRef extends Construct implements IEventRuleTarget, logs.ILogSubscriptionDestination { /** * Creates a Lambda function object which represents a function not defined * within this stack. @@ -109,10 +110,15 @@ export abstract class LambdaRef extends Construct implements IEventRuleTarget { /** * Indicates if the resource policy that allows CloudWatch events to publish - * notifications to this topic have been added. + * notifications to this lambda have been added. */ private eventRuleTargetPolicyAdded = false; + /** + * Indicates if the policy that allows CloudWatch logs to publish to this lambda has been added. + */ + private logSubscriptionDestinationPolicyAddedFor: logs.LogGroupArn[] = []; + /** * Adds a permission to the Lambda resource policy. * @param name A name for the permission construct @@ -212,6 +218,23 @@ export abstract class LambdaRef extends Construct implements IEventRuleTarget { return this.metric('Throttles', { statistic: 'sum', ...props }); } + public logSubscriptionDestination(sourceLogGroup: logs.LogGroup): logs.LogSubscriptionDestination { + const arn = sourceLogGroup.logGroupArn; + + if (this.logSubscriptionDestinationPolicyAddedFor.indexOf(arn) === -1) { + // NOTE: the use of {AWS::Region} limits this to the same region, which shouldn't really be an issue, + // since the Lambda must be in the same region as the SubscriptionFilter anyway. + // + // (Wildcards in principals are unfortunately not supported. + this.addPermission('InvokedByCloudWatchLogs', { + principal: new ServicePrincipal(new FnConcat('logs.', new AwsRegion(), '.amazonaws.com')), + sourceArn: arn + }); + this.logSubscriptionDestinationPolicyAddedFor.push(arn); + } + return { arn: this.functionArn }; + } + private parsePermissionPrincipal(principal?: PolicyPrincipal) { if (!principal) { return undefined; diff --git a/packages/@aws-cdk/lambda/package.json b/packages/@aws-cdk/lambda/package.json index 5f9730a0b8bd5..acbbd1d534a63 100644 --- a/packages/@aws-cdk/lambda/package.json +++ b/packages/@aws-cdk/lambda/package.json @@ -44,6 +44,7 @@ "@aws-cdk/core": "^0.7.3-beta", "@aws-cdk/events": "^0.7.3-beta", "@aws-cdk/iam": "^0.7.3-beta", - "@aws-cdk/s3": "^0.7.3-beta" + "@aws-cdk/s3": "^0.7.3-beta", + "@aws-cdk/logs": "^0.7.3-beta" } } diff --git a/packages/@aws-cdk/lambda/test/test.subscriptiondestination.ts b/packages/@aws-cdk/lambda/test/test.subscriptiondestination.ts new file mode 100644 index 0000000000000..d986d97722163 --- /dev/null +++ b/packages/@aws-cdk/lambda/test/test.subscriptiondestination.ts @@ -0,0 +1,39 @@ +import { expect, haveResource } from '@aws-cdk/assert'; +import { Stack } from '@aws-cdk/core'; +import { FilterPattern, LogGroup, SubscriptionFilter } from '@aws-cdk/logs'; +import { Test } from 'nodeunit'; +import { Lambda, LambdaInlineCode, LambdaRuntime } from '../lib'; + +export = { + 'lambda can be used as metric subscription destination'(test: Test) { + // GIVEN + const stack = new Stack(); + const lambda = new Lambda(stack, 'MyLambda', { + code: new LambdaInlineCode('foo'), + handler: 'index.handler', + runtime: LambdaRuntime.NodeJS610, + }); + const logGroup = new LogGroup(stack, 'LogGroup'); + + // WHEN + new SubscriptionFilter(stack, 'Subscription', { + logGroup, + destination: lambda, + filterPattern: FilterPattern.allEvents() + }); + + // THEN: subscription target is Lambda + expect(stack).to(haveResource('AWS::Logs::SubscriptionFilter', { + DestinationArn: { "Fn::GetAtt": [ "MyLambdaCCE802FB", "Arn" ] }, + })); + + // THEN: Lambda has permissions to be invoked by CWL + expect(stack).to(haveResource('AWS::Lambda::Permission', { + Action: "lambda:InvokeFunction", + FunctionName: { Ref: "MyLambdaCCE802FB" }, + Principal: { "Fn::Join": ["", ["logs.", {Ref: "AWS::Region"}, ".amazonaws.com"]] } + })); + + test.done(); + } +}; \ No newline at end of file diff --git a/packages/@aws-cdk/logs/.gitignore b/packages/@aws-cdk/logs/.gitignore index bec015f14d203..760493279d0cd 100644 --- a/packages/@aws-cdk/logs/.gitignore +++ b/packages/@aws-cdk/logs/.gitignore @@ -1,9 +1,10 @@ +*.d.ts +*.generated.ts *.js *.js.map -*.d.ts +.jsii +dist +lib/generated/resources.ts +node_modules tsconfig.json tslint.json -node_modules -*.generated.ts -dist -.jsii diff --git a/packages/@aws-cdk/logs/README.md b/packages/@aws-cdk/logs/README.md new file mode 100644 index 0000000000000..65d69e6dd20d2 --- /dev/null +++ b/packages/@aws-cdk/logs/README.md @@ -0,0 +1,197 @@ +## AWS CloudWatch Logs Construct Library + +This library supplies constructs for working with CloudWatch Logs. + +### Log Groups/Streams + +The basic unit of CloudWatch is a *Log Group*. Every log group typically has the +same kind of data logged to it, in the same format. If there are multiple +applications or services logging into the Log Group, each of them creates a new +*Log Stream*. + +Every log operation creates a "log event", which can consist of a simple string +or a single-line JSON object. JSON objects have the advantage that they afford +more filtering abilities (see below). + +The only configurable attribute for log streams is the retention period, which +configures after how much time the events in the log stream expire and are +deleted. + +The default retention period if not supplied is 2 years, but it can be set to +any amount of days, or `Infinity` to keep the data in the log group forever. + +[retention example](test/example.retention.lit.ts) + +### Subscriptions and Destinations + +Log events matching a particular filter can be sent to either a Lambda function +or a Kinesis stream. + +If the Kinesis stream lives in a different account, a `CrossAccountDestination` +object needs to be added in the destination account which will act as a proxy +for the remote Kinesis stream. This object is automatically created for you +if you use the CDK Kinesis library. + +Create a `SubscriptionFilter`, initialize it with an appropriate `Pattern` (see +below) and supply the intended destination: + +```ts +const lambda = new Lambda(this, 'Lambda', { ... }); +const logGroup = new LogGroup(this, 'LogGroup', { ... }); + +new SubscriptionFilter(this, 'Subscription', { + logGroup, + destination: lambda, + filterPattern: FilterPattern.allTerms("ERROR", "MainThread") +}); +``` + +### Metric Filters + +CloudWatch Logs can extract and emit metrics based on a textual log stream. +Depending on your needs, this may be a more convenient way of generating metrics +for you application than making calls to CloudWatch Metrics yourself. + +A `MetricFilter` either emits a fixed number every time it sees a log event +matching a particular pattern (see below), or extracts a number from the log +event and uses that as the metric value. + +Example: + +[metricfilter example](test/integ.metricfilter.lit.ts) + +Remember that if you want to use a value from the log event as the metric value, +you must mention it in your pattern somewhere. + +### Patterns + +Patterns describe which log events match a subscription or metric filter. There +are three types of patterns: + +* Text patterns +* JSON patterns +* Space-delimited table patterns + +All patterns are constructed by using static functions on the `FilterPattern` +class. + +In addition to the patterns above, the following special patterns exist: + +* `FilterPattern.allEvents()`: matches all log events. +* `FilterPattern.literal(string)`: if you already know what pattern expression to + use, this function takes a string and will use that as the log pattern. For + more information, see the [Filter and Pattern + Syntax](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html). + +#### Text Patterns + +Text patterns match if the literal strings appear in the text form of the log +line. + +* `FilterPattern.allTerms(term, term, ...)`: matches if all of the given terms + (substrings) appear in the log event. +* `FilterPattern.anyTerm(term, term, ...)`: matches if all of the given terms + (substrings) appear in the log event. +* `FilterPattern.anyGroup([term, term, ...], [term, term, ...], ...)`: matches if + all of the terms in any of the groups (specified as arrays) matches. This is + an OR match. + + +Examples: + +```ts +// Search for lines that contain both "ERROR" and "MainThread" +const pattern1 = FilterPattern.allTerms('ERROR', 'MainThread'); + +// Search for lines that either contain both "ERROR" and "MainThread", or +// both "WARN" and "Deadlock". +const pattern2 = FilterPattern.anyGroup( + ['ERROR', 'MainThread'], + ['WARN', 'Deadlock'], + ); +``` + +### JSON Patterns + +JSON patterns apply if the log event is the JSON representation of an object +(without any other characters, so it cannot include a prefix such as timestamp +or log level). JSON patterns can make comparisons on the values inside the +fields. + +* **Strings**: the comparison operators allowed for strings are `=` and `!=`. + String values can start or end with a `*` wildcard. +* **Numbers**: the comparison operators allowed for numbers are `=`, `!=`, + `<`, `<=`, `>`, `>=`. + +Fields in the JSON structure are identified by identifier the complete object as `$` +and then descending into it, such as `$.field` or `$.list[0].field`. + +* `FilterPattern.stringValue(field, comparison, string)`: matches if the given + field compares as indicated with the given string value. +* `FilterPattern.numberValue(field, comparison, number)`: matches if the given + field compares as indicated with the given numerical value. +* `FilterPattern.isNull(field)`: matches if the given field exists and has the + value `null`. +* `FilterPattern.notExists(field)`: matches if the given field is not in the JSON + structure. +* `FilterPattern.exists(field)`: matches if the given field is in the JSON + structure. +* `FilterPattern.booleanValue(field, boolean)`: matches if the given field + is exactly the given boolean value. +* `FilterPattern.all(jsonPattern, jsonPattern, ...)`: matches if all of the + given JSON patterns match. This makes an AND combination of the given + patterns. +* `FilterPattern.any(jsonPattern, jsonPattern, ...)`: matches if any of the + given JSON patterns match. This makes an OR combination of the given + patterns. + + +Example: + +```ts +// Search for all events where the component field is equal to +// "HttpServer" and either error is true or the latency is higher +// than 1000. +const pattern = FilterPattern.all( + FilterPattern.stringValue('$.component', '=', 'HttpServer'), + FilterPattern.any( + FilterPattern.booleanValue('$.error', true), + FilterPattern.numberValue('$.latency', '>', 1000) + )); +``` + +### Space-delimited table patterns + +If the log events are rows of a space-delimited table, this pattern can be used +to identify the columns in that structure and add conditions on any of them. The +canonical example where you would apply this type of pattern is Apache server +logs. + +Text that is surrounded by `"..."` quotes or `[...]` square brackets will +be treated as one column. + +* `FilterPattern.spaceDelimited(column, column, ...)`: construct a + `SpaceDelimitedTextPattern` object with the indicated columns. The columns + map one-by-one the columns found in the log event. The string `"..."` may + be used to specify an arbitrary number of unnamed columns anywhere in the + name list (but may only be specified once). + +After constructing a `SpaceDelimitedTextPattern`, you can use the following +two members to add restrictions: + +* `pattern.whereString(field, comparison, string)`: add a string condition. + The rules are the same as for JSON patterns. +* `pattern.whereNumber(field, comparison, number)`: add a numerical condition. + The rules are the same as for JSON patterns. + +Multiple restrictions can be added on the same column; they must all apply. + +Example: + +```ts +// Search for all events where the component is "HttpServer" and the +// result code is not equal to 200. +const pattern = FilterPattern.spaceDelimited('time', 'component', '...', 'result_code', 'latency') + .whereString('component', '=', 'HttpServer') + .whereNumber('result_code', '!=', 200); +``` diff --git a/packages/@aws-cdk/logs/lib/cross-account-destination.ts b/packages/@aws-cdk/logs/lib/cross-account-destination.ts new file mode 100644 index 0000000000000..c375b305d7c78 --- /dev/null +++ b/packages/@aws-cdk/logs/lib/cross-account-destination.ts @@ -0,0 +1,100 @@ +import cdk = require('@aws-cdk/core'); +import iam = require('@aws-cdk/iam'); +import { LogGroup } from './log-group'; +import { cloudformation, DestinationArn } from './logs.generated'; +import { ILogSubscriptionDestination, LogSubscriptionDestination } from './subscription-filter'; + +export interface CrossAccountDestinationProps { + /** + * The name of the log destination. + * + * @default Automatically generated + */ + destinationName?: string; + + /** + * The role to assume that grants permissions to write to 'target'. + * + * The role must be assumable by 'logs.{REGION}.amazonaws.com'. + */ + role: iam.Role; + + /** + * The log destination target's ARN + */ + targetArn: cdk.Arn; +} + +/** + * A new CloudWatch Logs Destination for use in cross-account scenarios + * + * Log destinations can be used to subscribe a Kinesis stream in a different + * account to a CloudWatch Subscription. A Kinesis stream in the same account + * can be subscribed directly. + * + * The @aws-cdk/kinesis library takes care of this automatically; you shouldn't + * need to bother with this class. + */ +export class CrossAccountDestination extends cdk.Construct implements ILogSubscriptionDestination { + /** + * Policy object of this CrossAccountDestination object + */ + public readonly policyDocument: cdk.PolicyDocument = new cdk.PolicyDocument(); + + /** + * The name of this CrossAccountDestination object + */ + public readonly destinationName: DestinationName; + + /** + * The ARN of this CrossAccountDestination object + */ + public readonly destinationArn: DestinationArn; + + /** + * The inner resource + */ + private readonly resource: cloudformation.DestinationResource; + + constructor(parent: cdk.Construct, id: string, props: CrossAccountDestinationProps) { + super(parent, id); + + this.policyDocument = new cdk.PolicyDocument(); + + // In the underlying model, the name is not optional, but we make it so anyway. + const destinationName = props.destinationName || new cdk.Token(() => this.generateUniqueName()); + + this.resource = new cloudformation.DestinationResource(this, 'Resource', { + destinationName, + destinationPolicy: new cdk.Token(() => !this.policyDocument.isEmpty ? JSON.stringify(this.policyDocument.resolve()) : ""), + roleArn: props.role.roleArn, + targetArn: props.targetArn + }); + + this.destinationArn = this.resource.destinationArn; + this.destinationName = this.resource.ref; + } + + public addToPolicy(statement: cdk.PolicyStatement) { + this.policyDocument.addStatement(statement); + } + + public logSubscriptionDestination(_sourceLogGroup: LogGroup): LogSubscriptionDestination { + return { arn: this.destinationArn }; + } + + /** + * Generate a unique Destination name in case the user didn't supply one + */ + private generateUniqueName(): string { + // Combination of stack name and LogicalID, which are guaranteed to be unique. + const stack = cdk.Stack.find(this); + return stack.name + '-' + this.resource.logicalId; + } +} + +/** + * Name of a CloudWatch Destination + */ +export class DestinationName extends cdk.Token { +} \ No newline at end of file diff --git a/packages/@aws-cdk/logs/lib/index.ts b/packages/@aws-cdk/logs/lib/index.ts index ac162b75659a7..bf626238843c3 100644 --- a/packages/@aws-cdk/logs/lib/index.ts +++ b/packages/@aws-cdk/logs/lib/index.ts @@ -1,2 +1,9 @@ +export * from './cross-account-destination'; +export * from './log-group'; +export * from './log-stream'; +export * from './metric-filter'; +export * from './pattern'; +export * from './subscription-filter'; + // AWS::Logs CloudFormation Resources: export * from './logs.generated'; diff --git a/packages/@aws-cdk/logs/lib/log-group.ts b/packages/@aws-cdk/logs/lib/log-group.ts new file mode 100644 index 0000000000000..4bb9360fbee6c --- /dev/null +++ b/packages/@aws-cdk/logs/lib/log-group.ts @@ -0,0 +1,185 @@ +import cdk = require('@aws-cdk/core'); +import { LogStream } from './log-stream'; +import { cloudformation, LogGroupArn } from './logs.generated'; +import { MetricFilter } from './metric-filter'; +import { IFilterPattern } from './pattern'; +import { ILogSubscriptionDestination, SubscriptionFilter } from './subscription-filter'; + +/** + * Properties for a new LogGroup + */ +export interface LogGroupProps { + /** + * Name of the log group. + * + * @default Automatically generated + */ + logGroupName?: string; + + /** + * How long, in days, the log contents will be retained. + * + * To retain all logs, set this value to Infinity. + * + * @default 730 days (2 years) + */ + retentionDays?: number; +} + +/** + * A new CloudWatch Log Group + */ +export class LogGroup extends cdk.Construct { + /** + * The ARN of this log group + */ + public readonly logGroupArn: LogGroupArn; + + /** + * The name of this log group + */ + public readonly logGroupName: LogGroupName; + + constructor(parent: cdk.Construct, id: string, props: LogGroupProps = {}) { + super(parent, id); + + let retentionInDays = props.retentionDays; + if (retentionInDays === undefined) { retentionInDays = 730; } + if (retentionInDays === Infinity) { retentionInDays = undefined; } + + if (retentionInDays !== undefined && retentionInDays <= 0) { + throw new Error(`retentionInDays must be positive, got ${retentionInDays}`); + } + + const resource = new cloudformation.LogGroupResource(this, 'Resource', { + logGroupName: props.logGroupName, + retentionInDays, + }); + + this.logGroupArn = resource.logGroupArn; + this.logGroupName = resource.ref; + } + + /** + * Create a new Log Stream for this Log Group + * + * @param parent Parent construct + * @param id Unique identifier for the construct in its parent + * @param props Properties for creating the LogStream + */ + public newStream(parent: cdk.Construct, id: string, props: NewLogStreamProps = {}): LogStream { + return new LogStream(parent, id, { + logGroup: this, + ...props + }); + } + + /** + * Create a new Subscription Filter on this Log Group + * + * @param parent Parent construct + * @param id Unique identifier for the construct in its parent + * @param props Properties for creating the SubscriptionFilter + */ + public newSubscriptionFilter(parent: cdk.Construct, id: string, props: NewSubscriptionFilterProps): SubscriptionFilter { + return new SubscriptionFilter(parent, id, { + logGroup: this, + ...props + }); + } + + /** + * Create a new Metric Filter on this Log Group + * + * @param parent Parent construct + * @param id Unique identifier for the construct in its parent + * @param props Properties for creating the MetricFilter + */ + public newMetricFilter(parent: cdk.Construct, id: string, props: NewMetricFilterProps): MetricFilter { + return new MetricFilter(parent, id, { + logGroup: this, + ...props + }); + } +} + +/** + * Name of a log group + */ +export class LogGroupName extends cdk.Token { +} + +/** + * Properties for a new LogStream created from a LogGroup + */ +export interface NewLogStreamProps { + /** + * The name of the log stream to create. + * + * The name must be unique within the log group. + * + * @default Automatically generated + */ + logStreamName?: string; +} + +/** + * Properties for a new SubscriptionFilter created from a LogGroup + */ +export interface NewSubscriptionFilterProps { + /** + * The destination to send the filtered events to. + * + * For example, a Kinesis stream or a Lambda function. + */ + destination: ILogSubscriptionDestination; + + /** + * Log events matching this pattern will be sent to the destination. + */ + filterPattern: IFilterPattern; +} + +/** + * Properties for a MetricFilter created from a LogGroup + */ +export interface NewMetricFilterProps { + /** + * Pattern to search for log events. + */ + filterPattern: IFilterPattern; + + /** + * The namespace of the metric to emit. + */ + metricNamespace: string; + + /** + * The name of the metric to emit. + */ + metricName: string; + + /** + * The value to emit for the metric. + * + * Can either be a literal number (typically "1"), or the name of a field in the structure + * to take the value from the matched event. If you are using a field value, the field + * value must have been matched using the pattern. + * + * If you want to specify a field from a matched JSON structure, use '$.fieldName', + * and make sure the field is in the pattern (if only as '$.fieldName = *'). + * + * If you want to specify a field from a matched space-delimited structure, + * use '$fieldName'. + * + * @default "1" + */ + metricValue?: string; + + /** + * The value to emit if the pattern does not match a particular event. + * + * @default No metric emitted. + */ + defaultValue?: number; +} \ No newline at end of file diff --git a/packages/@aws-cdk/logs/lib/log-stream.ts b/packages/@aws-cdk/logs/lib/log-stream.ts new file mode 100644 index 0000000000000..9c7c361b450eb --- /dev/null +++ b/packages/@aws-cdk/logs/lib/log-stream.ts @@ -0,0 +1,49 @@ +import cdk = require('@aws-cdk/core'); +import { LogGroup } from './log-group'; +import { cloudformation } from './logs.generated'; + +/** + * Properties for a new LogStream + */ +export interface LogStreamProps { + /** + * The log group to create a log stream for. + */ + logGroup: LogGroup; + + /** + * The name of the log stream to create. + * + * The name must be unique within the log group. + * + * @default Automatically generated + */ + logStreamName?: string; +} + +/** + * A new Log Stream in a Log Group + */ +export class LogStream extends cdk.Construct { + /** + * The name of this log stream + */ + public readonly logStreamName: LogStreamName; + + constructor(parent: cdk.Construct, id: string, props: LogStreamProps) { + super(parent, id); + + const resource = new cloudformation.LogStreamResource(this, 'Resource', { + logGroupName: props.logGroup.logGroupName, + logStreamName: props.logStreamName + }); + + this.logStreamName = resource.ref; + } +} + +/** + * The name of a log stream + */ +export class LogStreamName extends cdk.Token { +} diff --git a/packages/@aws-cdk/logs/lib/metric-filter.ts b/packages/@aws-cdk/logs/lib/metric-filter.ts new file mode 100644 index 0000000000000..ff3742ed7c81d --- /dev/null +++ b/packages/@aws-cdk/logs/lib/metric-filter.ts @@ -0,0 +1,73 @@ +import cdk = require('@aws-cdk/core'); +import { LogGroup } from './log-group'; +import { cloudformation } from './logs.generated'; +import { IFilterPattern } from './pattern'; + +/** + * Properties for a MetricFilter + */ +export interface MetricFilterProps { + /** + * The log group to create the filter on. + */ + logGroup: LogGroup; + + /** + * Pattern to search for log events. + */ + filterPattern: IFilterPattern; + + /** + * The namespace of the metric to emit. + */ + metricNamespace: string; + + /** + * The name of the metric to emit. + */ + metricName: string; + + /** + * The value to emit for the metric. + * + * Can either be a literal number (typically "1"), or the name of a field in the structure + * to take the value from the matched event. If you are using a field value, the field + * value must have been matched using the pattern. + * + * If you want to specify a field from a matched JSON structure, use '$.fieldName', + * and make sure the field is in the pattern (if only as '$.fieldName = *'). + * + * If you want to specify a field from a matched space-delimited structure, + * use '$fieldName'. + * + * @default "1" + */ + metricValue?: string; + + /** + * The value to emit if the pattern does not match a particular event. + * + * @default No metric emitted. + */ + defaultValue?: number; +} + +/** + * A filter that extracts information from CloudWatch Logs and emits to CloudWatch Metrics + */ +export class MetricFilter extends cdk.Construct { + constructor(parent: cdk.Construct, id: string, props: MetricFilterProps) { + super(parent, id); + + new cloudformation.MetricFilterResource(this, 'Resource', { + logGroupName: props.logGroup.logGroupName, + filterPattern: props.filterPattern.logPatternString, + metricTransformations: [{ + metricNamespace: props.metricNamespace, + metricName: props.metricName, + metricValue: props.metricValue !== undefined ? props.metricValue : '1', + defaultValue: props.defaultValue + }] + }); + } +} diff --git a/packages/@aws-cdk/logs/lib/pattern.ts b/packages/@aws-cdk/logs/lib/pattern.ts new file mode 100644 index 0000000000000..90a79572a5c96 --- /dev/null +++ b/packages/@aws-cdk/logs/lib/pattern.ts @@ -0,0 +1,456 @@ +// Implementation of metric patterns + +/** + * Interface for objects that can render themselves to log patterns. + */ +export interface IFilterPattern { + readonly logPatternString: string; +} + +/** + * Base class for patterns that only match JSON log events. + */ +export abstract class JSONPattern implements IFilterPattern { + // This is a separate class so we have some type safety where users can't + // combine text patterns and JSON patterns with an 'and' operation. + constructor(public readonly jsonPatternString: string) { + } + + public get logPatternString(): string { + return '{ ' + this.jsonPatternString + ' }'; + } +} + +/** + * A collection of static methods to generate appropriate ILogPatterns + */ +export class FilterPattern { + + /** + * Use the given string as log pattern. + * + * See https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html + * for information on writing log patterns. + * + * @param logPatternString The pattern string to use. + */ + public static literal(logPatternString: string): IFilterPattern { + return new LiteralLogPattern(logPatternString); + } + + /** + * A log pattern that matches all events. + */ + public static allEvents(): IFilterPattern { + return new LiteralLogPattern(""); + } + + /** + * A log pattern that matches if all the strings given appear in the event. + * + * @param terms The words to search for. All terms must match. + */ + public static allTerms(...terms: string[]): IFilterPattern { + return new TextLogPattern([terms]); + } + + /** + * A log pattern that matches if any of the strings given appear in the event. + * + * @param terms The words to search for. Any terms must match. + */ + public static anyTerm(...terms: string[]): IFilterPattern { + return new TextLogPattern(terms.map(t => [t])); + } + + /** + * A log pattern that matches if any of the given term groups matches the event. + * + * A term group matches an event if all the terms in it appear in the event string. + * + * @param termGroups A list of term groups to search for. Any one of the clauses must match. + */ + public static anyTermGroup(...termGroups: string[][]): IFilterPattern { + return new TextLogPattern(termGroups); + } + + /** + * A JSON log pattern that compares string values. + * + * This pattern only matches if the event is a JSON event, and the indicated field inside + * compares with the string value. + * + * Use '$' to indicate the root of the JSON structure. The comparison operator can only + * compare equality or inequality. The '*' wildcard may appear in the value may at the + * start or at the end. + * + * For more information, see: + * + * https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html + * + * @param jsonField Field inside JSON. Example: "$.myField" + * @param comparison Comparison to carry out. Either = or !=. + * @param value The string value to compare to. May use '*' as wildcard at start or end of string. + */ + public static stringValue(jsonField: string, comparison: string, value: string): JSONPattern { + return new JSONStringPattern(jsonField, comparison, value); + } + + /** + * A JSON log pattern that compares numerical values. + * + * This pattern only matches if the event is a JSON event, and the indicated field inside + * compares with the value in the indicated way. + * + * Use '$' to indicate the root of the JSON structure. The comparison operator can only + * compare equality or inequality. The '*' wildcard may appear in the value may at the + * start or at the end. + * + * For more information, see: + * + * https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html + * + * @param jsonField Field inside JSON. Example: "$.myField" + * @param comparison Comparison to carry out. One of =, !=, <, <=, >, >=. + * @param value The numerical value to compare to + */ + public static numberValue(jsonField: string, comparison: string, value: number): JSONPattern { + return new JSONNumberPattern(jsonField, comparison, value); + } + + /** + * A JSON log pattern that matches if the field exists and has the special value 'null'. + * + * @param jsonField Field inside JSON. Example: "$.myField" + */ + public static isNull(jsonField: string): JSONPattern { + return new JSONPostfixPattern(jsonField, 'IS NULL'); + } + + /** + * A JSON log pattern that matches if the field does not exist. + * + * @param jsonField Field inside JSON. Example: "$.myField" + */ + public static notExists(jsonField: string): JSONPattern { + return new JSONPostfixPattern(jsonField, 'NOT EXISTS'); + } + + /** + * A JSON log patter that matches if the field exists. + * + * This is a readable convenience wrapper over 'field = *' + * + * @param jsonField Field inside JSON. Example: "$.myField" + */ + public static exists(jsonField: string): JSONPattern { + return new JSONStringPattern(jsonField, '=', '*'); + } + + /** + * A JSON log pattern that matches if the field exists and equals the boolean value. + * + * @param jsonField Field inside JSON. Example: "$.myField" + * @param value The value to match + */ + public static booleanValue(jsonField: string, value: boolean): JSONPattern { + return new JSONPostfixPattern(jsonField, value ? 'IS TRUE' : 'IS FALSE'); + } + + /** + * A JSON log pattern that matches if all given JSON log patterns match + */ + public static all(...patterns: JSONPattern[]): JSONPattern { + if (patterns.length === 0) { throw new Error('Must supply at least one pattern, or use allEvents() to match all events.'); } + if (patterns.length === 1) { return patterns[0]; } + return new JSONAggregatePattern('&&', patterns); + } + + /** + * A JSON log pattern that matches if any of the given JSON log patterns match + */ + public static any(...patterns: JSONPattern[]): JSONPattern { + if (patterns.length === 0) { throw new Error('Must supply at least one pattern'); } + if (patterns.length === 1) { return patterns[0]; } + return new JSONAggregatePattern('||', patterns); + } + + /** + * A space delimited log pattern matcher. + * + * The log event is divided into space-delimited columns (optionally + * enclosed by "" or [] to capture spaces into column values), and names + * are given to each column. + * + * '...' may be specified once to match any number of columns. + * + * Afterwards, conditions may be added to individual columns. + * + * @param columns The columns in the space-delimited log stream. + */ + public static spaceDelimited(...columns: string[]): SpaceDelimitedTextPattern { + return SpaceDelimitedTextPattern.construct(columns); + } +} + +/** + * Use a string literal as a log pattern + */ +class LiteralLogPattern implements IFilterPattern { + constructor(public readonly logPatternString: string) { + } +} + +/** + * Search for a set of set of terms + */ +class TextLogPattern implements IFilterPattern { + public readonly logPatternString: string; + + constructor(clauses: string[][]) { + const quotedClauses = clauses.map(terms => terms.map(quoteTerm).join(' ')); + if (quotedClauses.length === 1) { + this.logPatternString = quotedClauses[0]; + } else { + this.logPatternString = quotedClauses.map(alt => '?' + alt).join(' '); + } + } +} + +/** + * A string comparison for JSON values + */ +class JSONStringPattern extends JSONPattern { + public constructor(jsonField: string, comparison: string, value: string) { + comparison = validateStringOperator(comparison); + super(`${jsonField} ${comparison} ${quoteTerm(value)}`); + } +} + +/** + * A number comparison for JSON values + */ +class JSONNumberPattern extends JSONPattern { + public constructor(jsonField: string, comparison: string, value: number) { + comparison = validateNumericalOperator(comparison); + super(`${jsonField} ${comparison} ${value}`); + } +} + +/** + * A postfix operator for JSON patterns + */ +class JSONPostfixPattern extends JSONPattern { + public constructor(jsonField: string, postfix: string) { + // No validation, we assume these are generated by trusted factory functions + super(`${jsonField} ${postfix}`); + } +} + +/** + * Combines multiple other JSON patterns with an operator + */ +class JSONAggregatePattern extends JSONPattern { + public constructor(operator: string, patterns: JSONPattern[]) { + if (operator !== '&&' && operator !== '||') { + throw new Error('Operator must be one of && or ||'); + } + + const clauses = patterns.map(p => '(' + p.jsonPatternString + ')'); + + super(clauses.join(` ${operator} `)); + } +} + +export type RestrictionMap = {[column: string]: ColumnRestriction[]}; + +const COL_ELLIPSIS = '...'; + +/** + * Space delimited text pattern + */ +export class SpaceDelimitedTextPattern implements IFilterPattern { + /** + * Construct a new instance of a space delimited text pattern + * + * Since this class must be public, we can't rely on the user only creating it through + * the `LogPattern.spaceDelimited()` factory function. We must therefore validate the + * argument in the constructor. Since we're returning a copy on every mutation, and we + * don't want to re-validate the same things on every construction, we provide a limited + * set of mutator functions and only validate the new data every time. + */ + public static construct(columns: string[]) { + // Validation happens here because a user could instantiate this object directly without + // going through the factory + for (const column of columns) { + if (!validColumnName(column)) { + throw new Error(`Invalid column name: ${column}`); + } + } + + if (sum(columns.map(c => c === COL_ELLIPSIS ? 1 : 0)) > 1) { + throw new Error("Can use at most one '...' column"); + } + + return new SpaceDelimitedTextPattern(columns, {}); + } + + private constructor(private readonly columns: string[], private readonly restrictions: RestrictionMap) { + // Private constructor so we validate in the .construct() factory function + } + + /** + * Restrict where the pattern applies + */ + public whereString(columnName: string, comparison: string, value: string): SpaceDelimitedTextPattern { + if (columnName === COL_ELLIPSIS) { + throw new Error("Can't use '...' in a restriction"); + } + if (this.columns.indexOf(columnName) === -1) { + throw new Error(`Column in restrictions that is not in columns: ${columnName}`); + } + + comparison = validateStringOperator(comparison); + + return new SpaceDelimitedTextPattern(this.columns, this.addRestriction(columnName, { + comparison, + stringValue: value + })); + } + + /** + * Restrict where the pattern applies + */ + public whereNumber(columnName: string, comparison: string, value: number): SpaceDelimitedTextPattern { + if (columnName === COL_ELLIPSIS) { + throw new Error("Can't use '...' in a restriction"); + } + if (this.columns.indexOf(columnName) === -1) { + throw new Error(`Column in restrictions that is not in columns: ${columnName}`); + } + + comparison = validateNumericalOperator(comparison); + + return new SpaceDelimitedTextPattern(this.columns, this.addRestriction(columnName, { + comparison, + numberValue: value + })); + } + + public get logPatternString(): string { + return '[' + this.columns.map(this.columnExpression.bind(this)).join(', ') + ']'; + } + + /** + * Return the column expression for the given column + */ + private columnExpression(column: string) { + const restrictions = this.restrictions[column]; + if (!restrictions) { return column; } + + return restrictions.map(r => renderRestriction(column, r)).join(' && '); + } + + /** + * Make a copy of the current restrictions and add one + */ + private addRestriction(columnName: string, restriction: ColumnRestriction) { + const ret: RestrictionMap = {}; + for (const key of Object.keys(this.restrictions)) { + ret[key] = this.restrictions[key].slice(); + } + if (!(columnName in ret)) { ret[columnName] = []; } + ret[columnName].push(restriction); + return ret; + } +} + +export interface ColumnRestriction { + /** + * Comparison operator to use + */ + comparison: string; + + /** + * String value to compare to + * + * Exactly one of 'stringValue' and 'numberValue' must be set. + */ + stringValue?: string; + + /** + * Number value to compare to + * + * Exactly one of 'stringValue' and 'numberValue' must be set. + */ + numberValue?: number; +} + +/** + * Quote a term for use in a pattern expression + * + * It's never wrong to quote a string term, and required if the term + * contains non-alphanumerical characters, so we just always do it. + * + * Inner double quotes are escaped using a backslash. + */ +function quoteTerm(term: string): string { + return '"' + term.replace(/\\/g, '\\\\').replace(/"/g, '\\"') + '"'; +} + +/** + * Return whether the given column name is valid in a space-delimited table + */ +function validColumnName(column: string) { + return column === COL_ELLIPSIS || /^[a-zA-Z0-9_-]+$/.exec(column); +} + +/** + * Validate and normalize the string comparison operator + * + * Correct for a common typo/confusion, treat '==' as '=' + */ +function validateStringOperator(operator: string) { + if (operator === '==') { operator = '='; } + + if (operator !== '=' && operator !== '!=') { + throw new Error(`Invalid comparison operator ('${operator}'), must be either '=' or '!='`); + } + + return operator; +} + +const VALID_OPERATORS = ['=', '!=', '<', '<=', '>', '>=']; + +/** + * Validate and normalize numerical comparison operators + * + * Correct for a common typo/confusion, treat '==' as '=' + */ +function validateNumericalOperator(operator: string) { + // Correct for a common typo, treat '==' as '=' + if (operator === '==') { operator = '='; } + + if (VALID_OPERATORS.indexOf(operator) === -1) { + throw new Error(`Invalid comparison operator ('${operator}'), must be one of ${VALID_OPERATORS.join(', ')}`); + } + + return operator; +} + +/** + * Render a table restriction + */ +function renderRestriction(column: string, restriction: ColumnRestriction) { + if (restriction.numberValue !== undefined) { + return `${column} ${restriction.comparison} ${restriction.numberValue}`; + } else if (restriction.stringValue) { + return `${column} ${restriction.comparison} ${quoteTerm(restriction.stringValue)}`; + } else { + throw new Error('Invalid restriction'); + } +} + +function sum(xs: number[]): number { + return xs.reduce((a, c) => a + c, 0); +} \ No newline at end of file diff --git a/packages/@aws-cdk/logs/lib/subscription-filter.ts b/packages/@aws-cdk/logs/lib/subscription-filter.ts new file mode 100644 index 0000000000000..c1f63119d8a1d --- /dev/null +++ b/packages/@aws-cdk/logs/lib/subscription-filter.ts @@ -0,0 +1,79 @@ +import cdk = require('@aws-cdk/core'); +import iam = require('@aws-cdk/iam'); +import { LogGroup } from './log-group'; +import { cloudformation } from './logs.generated'; +import { IFilterPattern } from './pattern'; + +/** + * Interface for classes that can be the destination of a log Subscription + */ +export interface ILogSubscriptionDestination { + /** + * Return the properties required to send subscription events to this destination. + * + * If necessary, the destination can use the properties of the SubscriptionFilter + * object itself to configure its permissions to allow the subscription to write + * to it. + * + * The destination may reconfigure its own permissions in response to this + * function call. + */ + logSubscriptionDestination(sourceLogGroup: LogGroup): LogSubscriptionDestination; +} + +/** + * Properties returned by a Subscription destination + */ +export interface LogSubscriptionDestination { + /** + * The ARN of the subscription's destination + */ + readonly arn: cdk.Arn; + + /** + * The role to assume to write log events to the destination + * + * @default No role assumed + */ + readonly role?: iam.Role; +} + +/** + * Properties for a SubscriptionFilter + */ +export interface SubscriptionFilterProps { + /** + * The log group to create the subscription on. + */ + logGroup: LogGroup; + + /** + * The destination to send the filtered events to. + * + * For example, a Kinesis stream or a Lambda function. + */ + destination: ILogSubscriptionDestination; + + /** + * Log events matching this pattern will be sent to the destination. + */ + filterPattern: IFilterPattern; +} + +/** + * A new Subscription on a CloudWatch log group. + */ +export class SubscriptionFilter extends cdk.Construct { + constructor(parent: cdk.Construct, id: string, props: SubscriptionFilterProps) { + super(parent, id); + + const destProps = props.destination.logSubscriptionDestination(props.logGroup); + + new cloudformation.SubscriptionFilterResource(this, 'Resource', { + logGroupName: props.logGroup.logGroupName, + destinationArn: destProps.arn, + roleArn: destProps.role && destProps.role.roleArn, + filterPattern: props.filterPattern.logPatternString + }); + } +} \ No newline at end of file diff --git a/packages/@aws-cdk/logs/package.json b/packages/@aws-cdk/logs/package.json index 353b2aa34f58a..49d6916e8cd77 100644 --- a/packages/@aws-cdk/logs/package.json +++ b/packages/@aws-cdk/logs/package.json @@ -17,8 +17,8 @@ }, "scripts": { "build": "cfn2ts --scope=AWS::Logs && jsii && tslint -p . && pkglint", - "watch": "jsii -w", "lint": "jsii && tslint -p . --force", + "watch": "jsii -w", "test": "nodeunit test/test.*.js && cdk-integ-assert", "integ": "cdk-integ", "pkglint": "pkglint -f" @@ -40,6 +40,7 @@ "pkglint": "^0.7.3-beta" }, "dependencies": { - "@aws-cdk/core": "^0.7.3-beta" + "@aws-cdk/core": "^0.7.3-beta", + "@aws-cdk/iam": "^0.7.3-beta" } } diff --git a/packages/@aws-cdk/logs/test/example.retention.lit.ts b/packages/@aws-cdk/logs/test/example.retention.lit.ts new file mode 100644 index 0000000000000..d710e8bc5e906 --- /dev/null +++ b/packages/@aws-cdk/logs/test/example.retention.lit.ts @@ -0,0 +1,29 @@ +import { Stack } from '@aws-cdk/core'; +import { LogGroup } from '../lib'; + +const stack = new Stack(); + +function shortLogGroup() { + /// !show + // Configure log group for short retention + const logGroup = new LogGroup(stack, 'LogGroup', { + retentionDays: 7 + }); + /// !hide + return logGroup; +} + +function infiniteLogGroup() { + /// !show + // Configure log group for infinite retention + const logGroup = new LogGroup(stack, 'LogGroup', { + retentionDays: Infinity + }); + /// !hide + return logGroup; +} + +// + +Array.isArray(shortLogGroup); +Array.isArray(infiniteLogGroup); \ No newline at end of file diff --git a/packages/@aws-cdk/logs/test/integ.metricfilter.lit.expected.json b/packages/@aws-cdk/logs/test/integ.metricfilter.lit.expected.json new file mode 100644 index 0000000000000..d6da6b5494495 --- /dev/null +++ b/packages/@aws-cdk/logs/test/integ.metricfilter.lit.expected.json @@ -0,0 +1,26 @@ +{ + "Resources": { + "LogGroupF5B46931": { + "Type": "AWS::Logs::LogGroup", + "Properties": { + "RetentionInDays": 730 + } + }, + "MetricFilter1B93B6E5": { + "Type": "AWS::Logs::MetricFilter", + "Properties": { + "FilterPattern": "{ $.latency = \"*\" }", + "LogGroupName": { + "Ref": "LogGroupF5B46931" + }, + "MetricTransformations": [ + { + "MetricName": "Latency", + "MetricNamespace": "MyApp", + "MetricValue": "$.latency" + } + ] + } + } + } +} diff --git a/packages/@aws-cdk/logs/test/integ.metricfilter.lit.ts b/packages/@aws-cdk/logs/test/integ.metricfilter.lit.ts new file mode 100644 index 0000000000000..3c0f1227698e1 --- /dev/null +++ b/packages/@aws-cdk/logs/test/integ.metricfilter.lit.ts @@ -0,0 +1,24 @@ +import { App, Stack, StackProps } from '@aws-cdk/core'; +import { FilterPattern, LogGroup, MetricFilter } from '../lib'; + +class MetricFilterIntegStack extends Stack { + constructor(parent: App, name: string, props?: StackProps) { + super(parent, name, props); + + const logGroup = new LogGroup(this, 'LogGroup'); + + /// !show + new MetricFilter(this, 'MetricFilter', { + logGroup, + metricNamespace: 'MyApp', + metricName: 'Latency', + filterPattern: FilterPattern.exists('$.latency'), + metricValue: '$.latency' + }); + /// !hide + } +} + +const app = new App(process.argv); +new MetricFilterIntegStack(app, 'aws-cdk-metricfilter-integ'); +process.stdout.write(app.run()); \ No newline at end of file diff --git a/packages/@aws-cdk/logs/test/test.destination.ts b/packages/@aws-cdk/logs/test/test.destination.ts new file mode 100644 index 0000000000000..5a7dc6648efa1 --- /dev/null +++ b/packages/@aws-cdk/logs/test/test.destination.ts @@ -0,0 +1,59 @@ +import { expect, haveResource } from '@aws-cdk/assert'; +import { Arn, PolicyStatement, ServicePrincipal, Stack } from '@aws-cdk/core'; +import { Role } from '@aws-cdk/iam'; +import { Test } from 'nodeunit'; +import { CrossAccountDestination } from '../lib'; + +export = { + 'simple destination'(test: Test) { + // GIVEN + const stack = new Stack(); + const role = new Role(stack, 'Role', { + assumedBy: new ServicePrincipal('logs.us-east-2.amazonaws.com') + }); + + // WHEN + new CrossAccountDestination(stack, 'Dest', { + destinationName: 'MyDestination', + role, + targetArn: new Arn('arn:bogus') + }); + + // THEN + expect(stack).to(haveResource('AWS::Logs::Destination', { + DestinationName: 'MyDestination', + RoleArn: { "Fn::GetAtt": [ "Role1ABCC5F0", "Arn" ] }, + TargetArn: 'arn:bogus', + })); + + test.done(); + }, + + 'add policy to destination'(test: Test) { + // GIVEN + const stack = new Stack(); + const role = new Role(stack, 'Role', { + assumedBy: new ServicePrincipal('logs.us-east-2.amazonaws.com') + }); + + const dest = new CrossAccountDestination(stack, 'Dest', { + destinationName: 'MyDestination', + role, + targetArn: new Arn('arn:bogus') + }); + + // WHEN + dest.addToPolicy(new PolicyStatement() + .addAction('logs:TalkToMe')); + + // THEN + expect(stack).to(haveResource('AWS::Logs::Destination', (props: any) => { + // tslint:disable-next-line:no-console + const pol = JSON.parse(props.DestinationPolicy); + + return pol.Statement[0].action[0] === 'logs:TalkToMe'; + })); + + test.done(); + } +}; \ No newline at end of file diff --git a/packages/@aws-cdk/logs/test/test.loggroup.ts b/packages/@aws-cdk/logs/test/test.loggroup.ts new file mode 100644 index 0000000000000..ee98e563bee80 --- /dev/null +++ b/packages/@aws-cdk/logs/test/test.loggroup.ts @@ -0,0 +1,57 @@ +import { expect, haveResource, matchTemplate } from '@aws-cdk/assert'; +import { Stack } from '@aws-cdk/core'; +import { Test } from 'nodeunit'; +import { LogGroup } from '../lib'; + +export = { + 'fixed retention'(test: Test) { + // GIVEN + const stack = new Stack(); + + // WHEN + new LogGroup(stack, 'LogGroup', { + retentionDays: 7 + }); + + // THEN + expect(stack).to(haveResource('AWS::Logs::LogGroup', { + RetentionInDays: 7 + })); + + test.done(); + }, + + 'default retention'(test: Test) { + // GIVEN + const stack = new Stack(); + + // WHEN + new LogGroup(stack, 'LogGroup'); + + // THEN + expect(stack).to(haveResource('AWS::Logs::LogGroup', { + RetentionInDays: 730 + })); + + test.done(); + }, + + 'infinite retention'(test: Test) { + // GIVEN + const stack = new Stack(); + + // WHEN + new LogGroup(stack, 'LogGroup', { + retentionDays: Infinity + }); + + // THEN + expect(stack).to(matchTemplate({ + Resources: { + LogGroupF5B46931: { Type: "AWS::Logs::LogGroup" } + } + })); + + test.done(); + } +}; \ No newline at end of file diff --git a/packages/@aws-cdk/logs/test/test.logstream.ts b/packages/@aws-cdk/logs/test/test.logstream.ts new file mode 100644 index 0000000000000..d79920d8cc628 --- /dev/null +++ b/packages/@aws-cdk/logs/test/test.logstream.ts @@ -0,0 +1,24 @@ +import { expect, haveResource } from '@aws-cdk/assert'; +import { Stack } from '@aws-cdk/core'; +import { Test } from 'nodeunit'; +import { LogGroup, LogStream } from '../lib'; + +export = { + 'simple instantiation'(test: Test) { + // GIVEN + const stack = new Stack(); + + // WHEN + const logGroup = new LogGroup(stack, 'LogGroup'); + + new LogStream(stack, 'Stream', { + logGroup + }); + + // THEN + expect(stack).to(haveResource('AWS::Logs::LogStream', { + })); + + test.done(); + }, +}; diff --git a/packages/@aws-cdk/logs/test/test.metricfilter.ts b/packages/@aws-cdk/logs/test/test.metricfilter.ts new file mode 100644 index 0000000000000..09e9d2ebb11be --- /dev/null +++ b/packages/@aws-cdk/logs/test/test.metricfilter.ts @@ -0,0 +1,34 @@ +import { expect, haveResource } from '@aws-cdk/assert'; +import { Stack } from '@aws-cdk/core'; +import { Test } from 'nodeunit'; +import { FilterPattern, LogGroup, MetricFilter } from '../lib'; + +export = { + 'trivial instantiation'(test: Test) { + // GIVEN + const stack = new Stack(); + const logGroup = new LogGroup(stack, 'LogGroup'); + + // WHEN + new MetricFilter(stack, 'Subscription', { + logGroup, + metricNamespace: 'AWS/Test', + metricName: 'Latency', + metricValue: '$.latency', + filterPattern: FilterPattern.exists('$.latency') + }); + + // THEN + expect(stack).to(haveResource('AWS::Logs::MetricFilter', { + MetricTransformations: [{ + MetricNamespace: 'AWS/Test', + MetricName: 'Latency', + MetricValue: '$.latency', + }], + FilterPattern: '{ $.latency = "*" }', + LogGroupName: { Ref: "LogGroupF5B46931" } + })); + + test.done(); + }, +}; diff --git a/packages/@aws-cdk/logs/test/test.pattern.ts b/packages/@aws-cdk/logs/test/test.pattern.ts new file mode 100644 index 0000000000000..04efe52c2197c --- /dev/null +++ b/packages/@aws-cdk/logs/test/test.pattern.ts @@ -0,0 +1,152 @@ +import { Test } from 'nodeunit'; +import { FilterPattern } from '../lib'; + +export = { + 'text patterns': { + 'simple text pattern'(test: Test) { + const pattern = FilterPattern.allTerms('foo', 'bar', 'baz'); + + test.equal('"foo" "bar" "baz"', pattern.logPatternString); + + test.done(); + }, + + 'quoted terms'(test: Test) { + const pattern = FilterPattern.allTerms('"foo" he said'); + + test.equal('"\\"foo\\" he said"', pattern.logPatternString); + + test.done(); + }, + + 'disjunction of conjunctions'(test: Test) { + const pattern = FilterPattern.anyTermGroup( + ["foo", "bar"], + ["baz"] + ); + + test.equal('?"foo" "bar" ?"baz"', pattern.logPatternString); + + test.done(); + }, + + 'dont prefix with ? if only one disjunction'(test: Test) { + const pattern = FilterPattern.anyTermGroup( + ["foo", "bar"] + ); + + test.equal('"foo" "bar"', pattern.logPatternString); + + test.done(); + }, + + 'empty log pattern is empty string'(test: Test) { + const pattern = FilterPattern.anyTermGroup(); + + test.equal('', pattern.logPatternString); + + test.done(); + } + }, + + 'json patterns': { + 'string value'(test: Test) { + const pattern = FilterPattern.stringValue('$.field', '=', 'value'); + + test.equal('{ $.field = "value" }', pattern.logPatternString); + + test.done(); + }, + + 'also recognize =='(test: Test) { + const pattern = FilterPattern.stringValue('$.field', '==', 'value'); + + test.equal('{ $.field = "value" }', pattern.logPatternString); + + test.done(); + }, + + 'number patterns'(test: Test) { + const pattern = FilterPattern.numberValue('$.field', '<=', 300); + + test.equal('{ $.field <= 300 }', pattern.logPatternString); + + test.done(); + }, + + 'combining with AND or OR'(test: Test) { + const p1 = FilterPattern.numberValue('$.field', '<=', 300); + const p2 = FilterPattern.stringValue('$.field', '=', 'value'); + + const andPattern = FilterPattern.all(p1, p2); + test.equal('{ ($.field <= 300) && ($.field = "value") }', andPattern.logPatternString); + + const orPattern = FilterPattern.any(p1, p2); + test.equal('{ ($.field <= 300) || ($.field = "value") }', orPattern.logPatternString); + + test.done(); + }, + + 'single AND is not wrapped with parens'(test: Test) { + const p1 = FilterPattern.stringValue('$.field', '=', 'value'); + + const pattern = FilterPattern.all(p1); + + test.equal('{ $.field = "value" }', pattern.logPatternString); + + test.done(); + }, + + 'empty AND is rejected'(test: Test) { + test.throws(() => { + FilterPattern.all(); + }); + + test.done(); + }, + + 'invalid string operators are rejected'(test: Test) { + test.throws(() => { + FilterPattern.stringValue('$.field', '<=', 'hello'); + }); + + test.done(); + }, + + 'can test boolean value'(test: Test) { + const pattern = FilterPattern.booleanValue('$.field', false); + + test.equal('{ $.field IS FALSE }', pattern.logPatternString); + + test.done(); + }, + }, + + 'table patterns': { + 'simple model'(test: Test) { + const pattern = FilterPattern.spaceDelimited('...', 'status_code', 'bytes'); + + test.equal('[..., status_code, bytes]', pattern.logPatternString); + + test.done(); + }, + + 'add restrictions'(test: Test) { + const pattern = FilterPattern.spaceDelimited('...', 'status_code', 'bytes') + .whereString('status_code', '=', '4*') + .whereNumber('status_code', '!=', 403); + + test.equal('[..., status_code = "4*" && status_code != 403, bytes]', pattern.logPatternString); + + test.done(); + }, + + 'cant use more than one ellipsis'(test: Test) { + test.throws(() => { + FilterPattern.spaceDelimited('...', 'status_code', '...'); + }); + + test.done(); + } + } +}; \ No newline at end of file diff --git a/packages/@aws-cdk/logs/test/test.subscriptionfilter.ts b/packages/@aws-cdk/logs/test/test.subscriptionfilter.ts new file mode 100644 index 0000000000000..9ad32a90f0439 --- /dev/null +++ b/packages/@aws-cdk/logs/test/test.subscriptionfilter.ts @@ -0,0 +1,36 @@ +import { expect, haveResource } from '@aws-cdk/assert'; +import { Arn, Stack } from '@aws-cdk/core'; +import { Test } from 'nodeunit'; +import { FilterPattern, ILogSubscriptionDestination, LogGroup, SubscriptionFilter } from '../lib'; + +export = { + 'trivial instantiation'(test: Test) { + // GIVEN + const stack = new Stack(); + const logGroup = new LogGroup(stack, 'LogGroup'); + + // WHEN + new SubscriptionFilter(stack, 'Subscription', { + logGroup, + destination: new FakeDestination(), + filterPattern: FilterPattern.literal("some pattern") + }); + + // THEN + expect(stack).to(haveResource('AWS::Logs::SubscriptionFilter', { + DestinationArn: "arn:bogus", + FilterPattern: "some pattern", + LogGroupName: { Ref: "LogGroupF5B46931" } + })); + + test.done(); + }, +}; + +class FakeDestination implements ILogSubscriptionDestination { + public logSubscriptionDestination(_sourceLogGroup: LogGroup) { + return { + arn: new Arn('arn:bogus'), + }; + } +} \ No newline at end of file