diff --git a/CHANGELOG.md b/CHANGELOG.md index 51153cb786a..3cfd79df181 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,19 @@ ## 2.14.0 (Unreleased) + +ENHANCEMENTS: + +* data-source/aws_msk_cluster: Add `bootstrap_brokers_tls` attribute +* resource/aws_msk_cluster: Add `client_authentication`, `configuration_info`, and `encryption_in_transit` configuration blocks [GH-8850] +* resource/aws_msk_cluster: Add `bootstrap_brokers_tls` and `current_version` attributes [GH-8850] +* resource/aws_msk_cluster: Support `broker_node_group_into` configuration block `ebs_volume_size` argument updates [GH-8850] +* resource/aws_msk_cluster: Support tagging on creation [GH-8850] +* resource/aws_subnet: Use customizable timeouts for pending creation and waiting for `DependencyViolation` errors on deletion [GH-6322] + +BUG FIXES: + +* resource/aws_subnet: Bump default timeout for deletion from 10 to 20 minutes to better handle ELBv2 ENI deletions [GH-6322] +* resource/aws_launch_template: Add a nil check for `spot_options` to avoiding panicking if options are empty [GH-8844] + ## 2.13.0 (May 31, 2019) FEATURES: diff --git a/GNUmakefile b/GNUmakefile index 186e0197175..e029fe4b99e 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -33,8 +33,10 @@ websitefmtcheck: lint: @echo "==> Checking source code against linters..." @GOGC=30 golangci-lint run ./$(PKG_NAME) + @tfproviderlint -c 1 -S001 -S002 -S003 -S004 -S005 ./$(PKG_NAME) tools: + GO111MODULE=on go install github.com/bflad/tfproviderlint/cmd/tfproviderlint GO111MODULE=on go install github.com/client9/misspell/cmd/misspell GO111MODULE=on go install github.com/golangci/golangci-lint/cmd/golangci-lint diff --git a/aws/data_source_aws_batch_compute_environment_test.go b/aws/data_source_aws_batch_compute_environment_test.go index ef84f1252e0..515f7698d6c 100644 --- a/aws/data_source_aws_batch_compute_environment_test.go +++ b/aws/data_source_aws_batch_compute_environment_test.go @@ -15,7 +15,7 @@ func TestAccDataSourceAwsBatchComputeEnvironment(t *testing.T) { datasourceName := "data.aws_batch_compute_environment.by_name" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBatch(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ { diff --git a/aws/data_source_aws_batch_job_queue_test.go b/aws/data_source_aws_batch_job_queue_test.go index 420f940c0bd..30b0d94e7fb 100644 --- a/aws/data_source_aws_batch_job_queue_test.go +++ b/aws/data_source_aws_batch_job_queue_test.go @@ -15,7 +15,7 @@ func TestAccDataSourceAwsBatchJobQueue(t *testing.T) { datasourceName := "data.aws_batch_job_queue.by_name" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBatch(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ { diff --git a/aws/data_source_aws_cognito_user_pools_test.go b/aws/data_source_aws_cognito_user_pools_test.go index 78a194ecd97..595b9734103 100644 --- a/aws/data_source_aws_cognito_user_pools_test.go +++ b/aws/data_source_aws_cognito_user_pools_test.go @@ -12,7 +12,7 @@ import ( func TestAccDataSourceAwsCognitoUserPools_basic(t *testing.T) { rName := fmt.Sprintf("tf_acc_ds_cognito_user_pools_%s", acctest.RandString(7)) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ { diff --git a/aws/data_source_aws_eks_cluster_test.go b/aws/data_source_aws_eks_cluster_test.go index 4da5e530646..29bb780af1d 100644 --- a/aws/data_source_aws_eks_cluster_test.go +++ b/aws/data_source_aws_eks_cluster_test.go @@ -15,7 +15,7 @@ func TestAccAWSEksClusterDataSource_basic(t *testing.T) { resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSEks(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSEksClusterDestroy, Steps: []resource.TestStep{ diff --git a/aws/data_source_aws_msk_cluster.go b/aws/data_source_aws_msk_cluster.go index 1c97298f9a7..690e1753f39 100644 --- a/aws/data_source_aws_msk_cluster.go +++ b/aws/data_source_aws_msk_cluster.go @@ -22,6 +22,10 @@ func dataSourceAwsMskCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "bootstrap_brokers_tls": { + Type: schema.TypeString, + Computed: true, + }, "cluster_name": { Type: schema.TypeString, Required: true, @@ -92,23 +96,14 @@ func dataSourceAwsMskClusterRead(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("error reading MSK Cluster (%s) bootstrap brokers: %s", aws.StringValue(cluster.ClusterArn), err) } - listTagsInput := &kafka.ListTagsForResourceInput{ - ResourceArn: cluster.ClusterArn, - } - - listTagsOutput, err := conn.ListTagsForResource(listTagsInput) - - if err != nil { - return fmt.Errorf("error reading MSK Cluster (%s) tags: %s", aws.StringValue(cluster.ClusterArn), err) - } - d.Set("arn", aws.StringValue(cluster.ClusterArn)) d.Set("bootstrap_brokers", aws.StringValue(bootstrapBrokersoOutput.BootstrapBrokerString)) + d.Set("bootstrap_brokers_tls", aws.StringValue(bootstrapBrokersoOutput.BootstrapBrokerStringTls)) d.Set("cluster_name", aws.StringValue(cluster.ClusterName)) d.Set("kafka_version", aws.StringValue(cluster.CurrentBrokerSoftwareInfo.KafkaVersion)) d.Set("number_of_broker_nodes", aws.Int64Value(cluster.NumberOfBrokerNodes)) - if err := d.Set("tags", tagsToMapMskCluster(listTagsOutput.Tags)); err != nil { + if err := d.Set("tags", tagsToMapMskCluster(cluster.Tags)); err != nil { return fmt.Errorf("error setting tags: %s", err) } diff --git a/aws/data_source_aws_msk_cluster_test.go b/aws/data_source_aws_msk_cluster_test.go index 93055baf4ee..d4ceec8d860 100644 --- a/aws/data_source_aws_msk_cluster_test.go +++ b/aws/data_source_aws_msk_cluster_test.go @@ -23,6 +23,7 @@ func TestAccAWSMskClusterDataSource_Name(t *testing.T) { Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "arn", dataSourceName, "arn"), resource.TestCheckResourceAttrSet(dataSourceName, "bootstrap_brokers"), + resource.TestCheckResourceAttrSet(dataSourceName, "bootstrap_brokers_tls"), resource.TestCheckResourceAttrPair(resourceName, "cluster_name", dataSourceName, "cluster_name"), resource.TestCheckResourceAttrPair(resourceName, "kafka_version", dataSourceName, "kafka_version"), resource.TestCheckResourceAttrPair(resourceName, "number_of_broker_nodes", dataSourceName, "number_of_broker_nodes"), diff --git a/aws/data_source_aws_secretsmanager_secret_test.go b/aws/data_source_aws_secretsmanager_secret_test.go index 7f0b9bfac71..31b9318a10b 100644 --- a/aws/data_source_aws_secretsmanager_secret_test.go +++ b/aws/data_source_aws_secretsmanager_secret_test.go @@ -12,7 +12,7 @@ import ( func TestAccDataSourceAwsSecretsManagerSecret_Basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSecretsManager(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ { @@ -37,7 +37,7 @@ func TestAccDataSourceAwsSecretsManagerSecret_ARN(t *testing.T) { datasourceName := "data.aws_secretsmanager_secret.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSecretsManager(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ { @@ -56,7 +56,7 @@ func TestAccDataSourceAwsSecretsManagerSecret_Name(t *testing.T) { datasourceName := "data.aws_secretsmanager_secret.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSecretsManager(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ { @@ -75,7 +75,7 @@ func TestAccDataSourceAwsSecretsManagerSecret_Policy(t *testing.T) { datasourceName := "data.aws_secretsmanager_secret.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSecretsManager(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ { diff --git a/aws/data_source_aws_secretsmanager_secret_version_test.go b/aws/data_source_aws_secretsmanager_secret_version_test.go index 85e7651983c..e3dadbd8d2c 100644 --- a/aws/data_source_aws_secretsmanager_secret_version_test.go +++ b/aws/data_source_aws_secretsmanager_secret_version_test.go @@ -16,7 +16,7 @@ func TestAccDataSourceAwsSecretsManagerSecretVersion_Basic(t *testing.T) { datasourceName := "data.aws_secretsmanager_secret_version.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSecretsManager(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ { @@ -39,7 +39,7 @@ func TestAccDataSourceAwsSecretsManagerSecretVersion_VersionID(t *testing.T) { datasourceName := "data.aws_secretsmanager_secret_version.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSecretsManager(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ { @@ -58,7 +58,7 @@ func TestAccDataSourceAwsSecretsManagerSecretVersion_VersionStage(t *testing.T) datasourceName := "data.aws_secretsmanager_secret_version.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSSecretsManager(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ { diff --git a/aws/data_source_aws_transfer_server_test.go b/aws/data_source_aws_transfer_server_test.go index bdd54564f97..3f6b91bb6dc 100644 --- a/aws/data_source_aws_transfer_server_test.go +++ b/aws/data_source_aws_transfer_server_test.go @@ -13,7 +13,7 @@ func TestAccDataSourceAwsTransferServer_basic(t *testing.T) { datasourceName := "data.aws_transfer_server.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSTransfer(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ { @@ -35,7 +35,7 @@ func TestAccDataSourceAwsTransferServer_service_managed(t *testing.T) { datasourceName := "data.aws_transfer_server.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSTransfer(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ { @@ -57,7 +57,7 @@ func TestAccDataSourceAwsTransferServer_apigateway(t *testing.T) { datasourceName := "data.aws_transfer_server.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSTransfer(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ { diff --git a/aws/resource_aws_acmpca_certificate_authority.go b/aws/resource_aws_acmpca_certificate_authority.go index e6c30d88381..46bb1cedbb7 100644 --- a/aws/resource_aws_acmpca_certificate_authority.go +++ b/aws/resource_aws_acmpca_certificate_authority.go @@ -288,6 +288,9 @@ func resourceAwsAcmpcaCertificateAuthorityCreate(d *schema.ResourceData, meta in } return nil }) + if isResourceTimeoutError(err) { + output, err = conn.CreateCertificateAuthority(input) + } if err != nil { return fmt.Errorf("error creating ACMPCA Certificate Authority: %s", err) } diff --git a/aws/resource_aws_batch_compute_environment_test.go b/aws/resource_aws_batch_compute_environment_test.go index 0bad9a59bfb..69f4db7b8c2 100644 --- a/aws/resource_aws_batch_compute_environment_test.go +++ b/aws/resource_aws_batch_compute_environment_test.go @@ -67,7 +67,7 @@ func TestAccAWSBatchComputeEnvironment_createEc2(t *testing.T) { rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBatch(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckBatchComputeEnvironmentDestroy, Steps: []resource.TestStep{ @@ -85,7 +85,7 @@ func TestAccAWSBatchComputeEnvironment_createEc2WithTags(t *testing.T) { rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBatch(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckBatchComputeEnvironmentDestroy, Steps: []resource.TestStep{ @@ -105,7 +105,7 @@ func TestAccAWSBatchComputeEnvironment_createSpot(t *testing.T) { rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBatch(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckBatchComputeEnvironmentDestroy, Steps: []resource.TestStep{ @@ -123,7 +123,7 @@ func TestAccAWSBatchComputeEnvironment_createUnmanaged(t *testing.T) { rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBatch(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckBatchComputeEnvironmentDestroy, Steps: []resource.TestStep{ @@ -141,7 +141,7 @@ func TestAccAWSBatchComputeEnvironment_updateMaxvCpus(t *testing.T) { rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBatch(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckBatchComputeEnvironmentDestroy, Steps: []resource.TestStep{ @@ -167,7 +167,7 @@ func TestAccAWSBatchComputeEnvironment_updateInstanceType(t *testing.T) { rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBatch(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckBatchComputeEnvironmentDestroy, Steps: []resource.TestStep{ @@ -195,7 +195,7 @@ func TestAccAWSBatchComputeEnvironment_updateComputeEnvironmentName(t *testing.T expectedUpdatedName := fmt.Sprintf("tf_acc_test_updated_%d", rInt) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBatch(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckBatchComputeEnvironmentDestroy, Steps: []resource.TestStep{ @@ -221,7 +221,7 @@ func TestAccAWSBatchComputeEnvironment_createEc2WithoutComputeResources(t *testi rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBatch(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckBatchComputeEnvironmentDestroy, Steps: []resource.TestStep{ @@ -237,7 +237,7 @@ func TestAccAWSBatchComputeEnvironment_createUnmanagedWithComputeResources(t *te rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBatch(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckBatchComputeEnvironmentDestroy, Steps: []resource.TestStep{ @@ -256,7 +256,7 @@ func TestAccAWSBatchComputeEnvironment_launchTemplate(t *testing.T) { rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBatch(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckBatchComputeEnvironmentDestroy, Steps: []resource.TestStep{ @@ -280,7 +280,7 @@ func TestAccAWSBatchComputeEnvironment_createSpotWithoutBidPercentage(t *testing rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBatch(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckBatchComputeEnvironmentDestroy, Steps: []resource.TestStep{ @@ -296,7 +296,7 @@ func TestAccAWSBatchComputeEnvironment_updateState(t *testing.T) { rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBatch(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckBatchComputeEnvironmentDestroy, Steps: []resource.TestStep{ @@ -373,6 +373,22 @@ func testAccCheckAwsBatchComputeEnvironmentExists() resource.TestCheckFunc { } } +func testAccPreCheckAWSBatch(t *testing.T) { + conn := testAccProvider.Meta().(*AWSClient).batchconn + + input := &batch.DescribeComputeEnvironmentsInput{} + + _, err := conn.DescribeComputeEnvironments(input) + + if testAccPreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + func testAccAWSBatchComputeEnvironmentConfigBase(rInt int) string { return fmt.Sprintf(` ########## ecs_instance_role ########## diff --git a/aws/resource_aws_batch_job_definition_test.go b/aws/resource_aws_batch_job_definition_test.go index 09708ad19be..0c39aa6cde5 100644 --- a/aws/resource_aws_batch_job_definition_test.go +++ b/aws/resource_aws_batch_job_definition_test.go @@ -52,7 +52,7 @@ func TestAccAWSBatchJobDefinition_basic(t *testing.T) { ri := acctest.RandInt() config := fmt.Sprintf(testAccBatchJobDefinitionBaseConfig, ri) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBatch(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckBatchJobDefinitionDestroy, Steps: []resource.TestStep{ @@ -74,7 +74,7 @@ func TestAccAWSBatchJobDefinition_updateForcesNewResource(t *testing.T) { config := fmt.Sprintf(testAccBatchJobDefinitionBaseConfig, ri) updateConfig := fmt.Sprintf(testAccBatchJobDefinitionUpdateConfig, ri) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBatch(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckBatchJobDefinitionDestroy, Steps: []resource.TestStep{ diff --git a/aws/resource_aws_batch_job_queue_test.go b/aws/resource_aws_batch_job_queue_test.go index 942c0fa19c1..a1236af13e7 100644 --- a/aws/resource_aws_batch_job_queue_test.go +++ b/aws/resource_aws_batch_job_queue_test.go @@ -76,7 +76,7 @@ func TestAccAWSBatchJobQueue_basic(t *testing.T) { ri := acctest.RandInt() config := fmt.Sprintf(testAccBatchJobQueueBasic, ri) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBatch(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckBatchJobQueueDestroy, Steps: []resource.TestStep{ @@ -97,7 +97,7 @@ func TestAccAWSBatchJobQueue_disappears(t *testing.T) { rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBatch(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSLaunchTemplateDestroy, Steps: []resource.TestStep{ @@ -119,7 +119,7 @@ func TestAccAWSBatchJobQueue_update(t *testing.T) { config := fmt.Sprintf(testAccBatchJobQueueBasic, ri) updateConfig := fmt.Sprintf(testAccBatchJobQueueUpdate, ri) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBatch(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckBatchJobQueueDestroy, Steps: []resource.TestStep{ diff --git a/aws/resource_aws_budgets_budget_test.go b/aws/resource_aws_budgets_budget_test.go index ab556bfc4b3..d10dda59b26 100644 --- a/aws/resource_aws_budgets_budget_test.go +++ b/aws/resource_aws_budgets_budget_test.go @@ -24,7 +24,7 @@ func TestAccAWSBudgetsBudget_basic(t *testing.T) { configBasicUpdate := testAccAWSBudgetsBudgetConfigUpdate(name) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBudgets(t) }, Providers: testAccProviders, CheckDestroy: testAccAWSBudgetsBudgetDestroy, Steps: []resource.TestStep{ @@ -76,7 +76,7 @@ func TestAccAWSBudgetsBudget_prefix(t *testing.T) { configBasicUpdate := testAccAWSBudgetsBudgetConfigUpdate(name) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBudgets(t) }, Providers: testAccProviders, CheckDestroy: testAccAWSBudgetsBudgetDestroy, Steps: []resource.TestStep{ @@ -138,7 +138,7 @@ func TestAccAWSBudgetsBudget_notification(t *testing.T) { oneTopic := []string{"${aws_sns_topic.budget_notifications.arn}"} resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBudgets(t) }, Providers: testAccProviders, CheckDestroy: testAccAWSBudgetsBudgetDestroy, Steps: []resource.TestStep{ @@ -336,6 +336,24 @@ func testAccAWSBudgetsBudgetDestroy(s *terraform.State) error { return nil } +func testAccPreCheckAWSBudgets(t *testing.T) { + conn := testAccProvider.Meta().(*AWSClient).budgetconn + + input := &budgets.DescribeBudgetsInput{ + AccountId: aws.String(testAccProvider.Meta().(*AWSClient).accountid), + } + + _, err := conn.DescribeBudgets(input) + + if testAccPreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + func testAccAWSBudgetsBudgetConfigUpdate(name string) budgets.Budget { dateNow := time.Now().UTC() futureDate := dateNow.AddDate(0, 0, 14) diff --git a/aws/resource_aws_cloud9_environment_ec2_test.go b/aws/resource_aws_cloud9_environment_ec2_test.go index 331e80952c9..1b25d832d48 100644 --- a/aws/resource_aws_cloud9_environment_ec2_test.go +++ b/aws/resource_aws_cloud9_environment_ec2_test.go @@ -22,7 +22,7 @@ func TestAccAWSCloud9EnvironmentEc2_basic(t *testing.T) { resourceName := "aws_cloud9_environment_ec2.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloud9(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCloud9EnvironmentEc2Destroy, Steps: []resource.TestStep{ @@ -63,7 +63,7 @@ func TestAccAWSCloud9EnvironmentEc2_allFields(t *testing.T) { resourceName := "aws_cloud9_environment_ec2.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloud9(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCloud9EnvironmentEc2Destroy, Steps: []resource.TestStep{ @@ -100,7 +100,7 @@ func TestAccAWSCloud9EnvironmentEc2_importBasic(t *testing.T) { resourceName := "aws_cloud9_environment_ec2.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloud9(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCloud9EnvironmentEc2Destroy, Steps: []resource.TestStep{ @@ -180,6 +180,22 @@ func testAccCheckAWSCloud9EnvironmentEc2Destroy(s *terraform.State) error { return nil } +func testAccPreCheckAWSCloud9(t *testing.T) { + conn := testAccProvider.Meta().(*AWSClient).cloud9conn + + input := &cloud9.ListEnvironmentsInput{} + + _, err := conn.ListEnvironments(input) + + if testAccPreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + func testAccAWSCloud9EnvironmentEc2Config(name string) string { return fmt.Sprintf(` resource "aws_cloud9_environment_ec2" "test" { diff --git a/aws/resource_aws_cloudfront_distribution_test.go b/aws/resource_aws_cloudfront_distribution_test.go index bb6d8acce8c..0bb6f2f3f6d 100644 --- a/aws/resource_aws_cloudfront_distribution_test.go +++ b/aws/resource_aws_cloudfront_distribution_test.go @@ -82,7 +82,7 @@ func TestAccAWSCloudFrontDistribution_disappears(t *testing.T) { resourceName := "aws_cloudfront_distribution.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloudFront(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckCloudFrontDistributionDestroy, Steps: []resource.TestStep{ @@ -108,7 +108,7 @@ func TestAccAWSCloudFrontDistribution_S3Origin(t *testing.T) { ri := acctest.RandInt() testConfig := fmt.Sprintf(testAccAWSCloudFrontDistributionS3Config, ri, originBucket, logBucket, testAccAWSCloudFrontDistributionRetainConfig()) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloudFront(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckCloudFrontDistributionDestroy, Steps: []resource.TestStep{ @@ -143,7 +143,7 @@ func TestAccAWSCloudFrontDistribution_S3OriginWithTags(t *testing.T) { postConfig := fmt.Sprintf(testAccAWSCloudFrontDistributionS3ConfigWithTagsUpdated, ri, originBucket, logBucket, testAccAWSCloudFrontDistributionRetainConfig()) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloudFront(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckCloudFrontDistributionDestroy, Steps: []resource.TestStep{ @@ -190,7 +190,7 @@ func TestAccAWSCloudFrontDistribution_S3OriginWithTags(t *testing.T) { func TestAccAWSCloudFrontDistribution_customOrigin(t *testing.T) { var distribution cloudfront.Distribution resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloudFront(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckCloudFrontDistributionDestroy, Steps: []resource.TestStep{ @@ -222,7 +222,7 @@ func TestAccAWSCloudFrontDistribution_multiOrigin(t *testing.T) { var distribution cloudfront.Distribution resourceName := "aws_cloudfront_distribution.multi_origin_distribution" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloudFront(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckCloudFrontDistributionDestroy, Steps: []resource.TestStep{ @@ -258,7 +258,7 @@ func TestAccAWSCloudFrontDistribution_orderedCacheBehavior(t *testing.T) { var distribution cloudfront.Distribution resourceName := "aws_cloudfront_distribution.main" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloudFront(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckCloudFrontDistributionDestroy, Steps: []resource.TestStep{ @@ -288,7 +288,7 @@ func TestAccAWSCloudFrontDistribution_orderedCacheBehavior(t *testing.T) { func TestAccAWSCloudFrontDistribution_Origin_EmptyDomainName(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloudFront(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckCloudFrontDistributionDestroy, Steps: []resource.TestStep{ @@ -302,7 +302,7 @@ func TestAccAWSCloudFrontDistribution_Origin_EmptyDomainName(t *testing.T) { func TestAccAWSCloudFrontDistribution_Origin_EmptyOriginID(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloudFront(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckCloudFrontDistributionDestroy, Steps: []resource.TestStep{ @@ -323,7 +323,7 @@ func TestAccAWSCloudFrontDistribution_noOptionalItemsConfig(t *testing.T) { var distribution cloudfront.Distribution resourceName := "aws_cloudfront_distribution.no_optional_items" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloudFront(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckCloudFrontDistributionDestroy, Steps: []resource.TestStep{ @@ -403,7 +403,7 @@ func TestAccAWSCloudFrontDistribution_noOptionalItemsConfig(t *testing.T) { func TestAccAWSCloudFrontDistribution_HTTP11Config(t *testing.T) { var distribution cloudfront.Distribution resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloudFront(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckCloudFrontDistributionDestroy, Steps: []resource.TestStep{ @@ -429,7 +429,7 @@ func TestAccAWSCloudFrontDistribution_HTTP11Config(t *testing.T) { func TestAccAWSCloudFrontDistribution_IsIPV6EnabledConfig(t *testing.T) { var distribution cloudfront.Distribution resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloudFront(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckCloudFrontDistributionDestroy, Steps: []resource.TestStep{ @@ -457,7 +457,7 @@ func TestAccAWSCloudFrontDistribution_IsIPV6EnabledConfig(t *testing.T) { func TestAccAWSCloudFrontDistribution_noCustomErrorResponseConfig(t *testing.T) { var distribution cloudfront.Distribution resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloudFront(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckCloudFrontDistributionDestroy, Steps: []resource.TestStep{ @@ -486,7 +486,7 @@ func TestAccAWSCloudFrontDistribution_DefaultCacheBehavior_ForwardedValues_Cooki retainOnDelete := testAccAWSCloudFrontDistributionRetainOnDeleteFromEnv() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloudFront(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckCloudFrontDistributionDestroy, Steps: []resource.TestStep{ @@ -529,7 +529,7 @@ func TestAccAWSCloudFrontDistribution_DefaultCacheBehavior_ForwardedValues_Heade retainOnDelete := testAccAWSCloudFrontDistributionRetainOnDeleteFromEnv() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloudFront(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckCloudFrontDistributionDestroy, Steps: []resource.TestStep{ @@ -569,7 +569,7 @@ func TestAccAWSCloudFrontDistribution_Enabled(t *testing.T) { resourceName := "aws_cloudfront_distribution.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloudFront(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckCloudFrontDistributionDestroy, Steps: []resource.TestStep{ @@ -610,7 +610,7 @@ func TestAccAWSCloudFrontDistribution_RetainOnDelete(t *testing.T) { resourceName := "aws_cloudfront_distribution.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloudFront(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckCloudFrontDistributionDestroy, Steps: []resource.TestStep{ @@ -640,7 +640,7 @@ func TestAccAWSCloudFrontDistribution_OrderedCacheBehavior_ForwardedValues_Cooki retainOnDelete := testAccAWSCloudFrontDistributionRetainOnDeleteFromEnv() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloudFront(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckCloudFrontDistributionDestroy, Steps: []resource.TestStep{ @@ -683,7 +683,7 @@ func TestAccAWSCloudFrontDistribution_OrderedCacheBehavior_ForwardedValues_Heade retainOnDelete := testAccAWSCloudFrontDistributionRetainOnDeleteFromEnv() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloudFront(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckCloudFrontDistributionDestroy, Steps: []resource.TestStep{ @@ -725,7 +725,7 @@ func TestAccAWSCloudFrontDistribution_ViewerCertificate_AcmCertificateArn(t *tes retainOnDelete := testAccAWSCloudFrontDistributionRetainOnDeleteFromEnv() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloudFront(t) }, ProviderFactories: testAccProviderFactories(&providers), CheckDestroy: testAccCheckCloudFrontDistributionDestroy, Steps: []resource.TestStep{ @@ -757,7 +757,7 @@ func TestAccAWSCloudFrontDistribution_ViewerCertificate_AcmCertificateArn_Confli retainOnDelete := testAccAWSCloudFrontDistributionRetainOnDeleteFromEnv() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloudFront(t) }, ProviderFactories: testAccProviderFactories(&providers), CheckDestroy: testAccCheckCloudFrontDistributionDestroy, Steps: []resource.TestStep{ @@ -786,7 +786,7 @@ func TestAccAWSCloudFrontDistribution_WaitForDeployment(t *testing.T) { resourceName := "aws_cloudfront_distribution.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloudFront(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckCloudFrontDistributionDestroy, Steps: []resource.TestStep{ @@ -1011,6 +1011,22 @@ func testAccCheckCloudFrontDistributionWaitForDeployment(distribution *cloudfron } } +func testAccPreCheckAWSCloudFront(t *testing.T) { + conn := testAccProvider.Meta().(*AWSClient).cloudfrontconn + + input := &cloudfront.ListDistributionsInput{} + + _, err := conn.ListDistributions(input) + + if testAccPreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + func testAccAWSCloudFrontDistributionRetainOnDeleteFromEnv() bool { _, ok := os.LookupEnv("TF_TEST_CLOUDFRONT_RETAIN") return ok @@ -1029,7 +1045,7 @@ func TestAccAWSCloudFrontDistribution_OriginGroups(t *testing.T) { ri := acctest.RandInt() testConfig := fmt.Sprintf(testAccAWSCloudFrontDistributionOriginGroupsConfig, ri, originBucket, backupBucket, testAccAWSCloudFrontDistributionRetainConfig()) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloudFront(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckCloudFrontDistributionDestroy, Steps: []resource.TestStep{ diff --git a/aws/resource_aws_cloudfront_origin_access_identity_test.go b/aws/resource_aws_cloudfront_origin_access_identity_test.go index 547f61a1877..350c50848d5 100644 --- a/aws/resource_aws_cloudfront_origin_access_identity_test.go +++ b/aws/resource_aws_cloudfront_origin_access_identity_test.go @@ -15,7 +15,7 @@ func TestAccAWSCloudFrontOriginAccessIdentity_importBasic(t *testing.T) { resourceName := "aws_cloudfront_origin_access_identity.origin_access_identity" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloudFront(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckCloudFrontOriginAccessIdentityDestroy, Steps: []resource.TestStep{ @@ -34,7 +34,7 @@ func TestAccAWSCloudFrontOriginAccessIdentity_importBasic(t *testing.T) { func TestAccAWSCloudFrontOriginAccessIdentity_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloudFront(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckCloudFrontOriginAccessIdentityDestroy, Steps: []resource.TestStep{ @@ -63,7 +63,7 @@ func TestAccAWSCloudFrontOriginAccessIdentity_basic(t *testing.T) { func TestAccAWSCloudFrontOriginAccessIdentity_noComment(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloudFront(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckCloudFrontOriginAccessIdentityDestroy, Steps: []resource.TestStep{ diff --git a/aws/resource_aws_cloudfront_public_key_test.go b/aws/resource_aws_cloudfront_public_key_test.go index e77a13feaa1..04c66f6f851 100644 --- a/aws/resource_aws_cloudfront_public_key_test.go +++ b/aws/resource_aws_cloudfront_public_key_test.go @@ -16,7 +16,7 @@ func TestAccAWSCloudFrontPublicKey_basic(t *testing.T) { rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloudFront(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckCloudFrontPublicKeyDestroy, Steps: []resource.TestStep{ @@ -39,7 +39,7 @@ func TestAccAWSCloudFrontPublicKey_namePrefix(t *testing.T) { startsWithPrefix := regexp.MustCompile("^tf-acc-test-") resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloudFront(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckCloudFrontPublicKeyDestroy, Steps: []resource.TestStep{ @@ -58,7 +58,7 @@ func TestAccAWSCloudFrontPublicKey_update(t *testing.T) { rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloudFront(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckCloudFrontPublicKeyDestroy, Steps: []resource.TestStep{ diff --git a/aws/resource_aws_cognito_identity_pool_roles_attachment_test.go b/aws/resource_aws_cognito_identity_pool_roles_attachment_test.go index 6d8ea151716..ac4d64014a0 100644 --- a/aws/resource_aws_cognito_identity_pool_roles_attachment_test.go +++ b/aws/resource_aws_cognito_identity_pool_roles_attachment_test.go @@ -19,7 +19,7 @@ func TestAccAWSCognitoIdentityPoolRolesAttachment_basic(t *testing.T) { updatedName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentity(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoIdentityPoolRolesAttachmentDestroy, Steps: []resource.TestStep{ @@ -47,7 +47,7 @@ func TestAccAWSCognitoIdentityPoolRolesAttachment_roleMappings(t *testing.T) { name := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentity(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoIdentityPoolRolesAttachmentDestroy, Steps: []resource.TestStep{ @@ -95,7 +95,7 @@ func TestAccAWSCognitoIdentityPoolRolesAttachment_roleMappingsWithAmbiguousRoleR name := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentity(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoIdentityPoolRolesAttachmentDestroy, Steps: []resource.TestStep{ @@ -111,7 +111,7 @@ func TestAccAWSCognitoIdentityPoolRolesAttachment_roleMappingsWithRulesTypeError name := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentity(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoIdentityPoolRolesAttachmentDestroy, Steps: []resource.TestStep{ @@ -127,7 +127,7 @@ func TestAccAWSCognitoIdentityPoolRolesAttachment_roleMappingsWithTokenTypeError name := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentity(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoIdentityPoolRolesAttachmentDestroy, Steps: []resource.TestStep{ diff --git a/aws/resource_aws_cognito_identity_pool_test.go b/aws/resource_aws_cognito_identity_pool_test.go index 290f76e6104..b5bdd88c7c2 100644 --- a/aws/resource_aws_cognito_identity_pool_test.go +++ b/aws/resource_aws_cognito_identity_pool_test.go @@ -19,7 +19,7 @@ func TestAccAWSCognitoIdentityPool_importBasic(t *testing.T) { rName := acctest.RandString(10) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentity(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSAPIGatewayAccountDestroy, Steps: []resource.TestStep{ @@ -41,7 +41,7 @@ func TestAccAWSCognitoIdentityPool_basic(t *testing.T) { updatedName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentity(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoIdentityPoolDestroy, Steps: []resource.TestStep{ @@ -70,7 +70,7 @@ func TestAccAWSCognitoIdentityPool_supportedLoginProviders(t *testing.T) { name := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentity(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoIdentityPoolDestroy, Steps: []resource.TestStep{ @@ -106,7 +106,7 @@ func TestAccAWSCognitoIdentityPool_openidConnectProviderArns(t *testing.T) { name := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentity(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoIdentityPoolDestroy, Steps: []resource.TestStep{ @@ -141,7 +141,7 @@ func TestAccAWSCognitoIdentityPool_samlProviderArns(t *testing.T) { name := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentity(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoIdentityPoolDestroy, Steps: []resource.TestStep{ @@ -177,7 +177,7 @@ func TestAccAWSCognitoIdentityPool_cognitoIdentityProviders(t *testing.T) { name := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentity(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoIdentityPoolDestroy, Steps: []resource.TestStep{ @@ -259,6 +259,24 @@ func testAccCheckAWSCognitoIdentityPoolDestroy(s *terraform.State) error { return nil } +func testAccPreCheckAWSCognitoIdentity(t *testing.T) { + conn := testAccProvider.Meta().(*AWSClient).cognitoconn + + input := &cognitoidentity.ListIdentityPoolsInput{ + MaxResults: aws.Int64(int64(1)), + } + + _, err := conn.ListIdentityPools(input) + + if testAccPreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + func testAccAWSCognitoIdentityPoolConfig_basic(name string) string { return fmt.Sprintf(` resource "aws_cognito_identity_pool" "main" { diff --git a/aws/resource_aws_cognito_identity_provider_test.go b/aws/resource_aws_cognito_identity_provider_test.go index fb2bf2e7fc2..3db35c8b7c2 100644 --- a/aws/resource_aws_cognito_identity_provider_test.go +++ b/aws/resource_aws_cognito_identity_provider_test.go @@ -15,7 +15,7 @@ func TestAccAWSCognitoIdentityProvider_basic(t *testing.T) { resourceName := "aws_cognito_identity_provider.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoIdentityProviderDestroy, Steps: []resource.TestStep{ diff --git a/aws/resource_aws_cognito_resource_server_test.go b/aws/resource_aws_cognito_resource_server_test.go index d1c05237801..dc8307f6812 100644 --- a/aws/resource_aws_cognito_resource_server_test.go +++ b/aws/resource_aws_cognito_resource_server_test.go @@ -21,7 +21,7 @@ func TestAccAWSCognitoResourceServer_basic(t *testing.T) { resourceName := "aws_cognito_resource_server.main" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoResourceServerDestroy, Steps: []resource.TestStep{ @@ -62,7 +62,7 @@ func TestAccAWSCognitoResourceServer_scope(t *testing.T) { resourceName := "aws_cognito_resource_server.main" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoResourceServerDestroy, Steps: []resource.TestStep{ diff --git a/aws/resource_aws_cognito_user_group_test.go b/aws/resource_aws_cognito_user_group_test.go index 0717e2a2ca7..9f179cd180c 100644 --- a/aws/resource_aws_cognito_user_group_test.go +++ b/aws/resource_aws_cognito_user_group_test.go @@ -19,7 +19,7 @@ func TestAccAWSCognitoUserGroup_basic(t *testing.T) { updatedGroupName := fmt.Sprintf("tf-acc-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoUserGroupDestroy, Steps: []resource.TestStep{ @@ -47,7 +47,7 @@ func TestAccAWSCognitoUserGroup_complex(t *testing.T) { updatedGroupName := fmt.Sprintf("tf-acc-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoUserGroupDestroy, Steps: []resource.TestStep{ @@ -80,7 +80,7 @@ func TestAccAWSCognitoUserGroup_RoleArn(t *testing.T) { resourceName := "aws_cognito_user_group.main" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoUserGroupDestroy, Steps: []resource.TestStep{ @@ -108,7 +108,7 @@ func TestAccAWSCognitoUserGroup_importBasic(t *testing.T) { groupName := fmt.Sprintf("tf-acc-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoUserGroupDestroy, Steps: []resource.TestStep{ diff --git a/aws/resource_aws_cognito_user_pool_client_test.go b/aws/resource_aws_cognito_user_pool_client_test.go index e00c883550a..13e04d4bc58 100644 --- a/aws/resource_aws_cognito_user_pool_client_test.go +++ b/aws/resource_aws_cognito_user_pool_client_test.go @@ -18,7 +18,7 @@ func TestAccAWSCognitoUserPoolClient_basic(t *testing.T) { clientName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoUserPoolClientDestroy, Steps: []resource.TestStep{ @@ -71,7 +71,7 @@ func TestAccAWSCognitoUserPoolClient_importBasic(t *testing.T) { } resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSEcsServiceDestroy, Steps: []resource.TestStep{ @@ -92,7 +92,7 @@ func TestAccAWSCognitoUserPoolClient_RefreshTokenValidity(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoUserPoolClientDestroy, Steps: []resource.TestStep{ @@ -119,7 +119,7 @@ func TestAccAWSCognitoUserPoolClient_allFields(t *testing.T) { clientName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoUserPoolClientDestroy, Steps: []resource.TestStep{ @@ -165,7 +165,7 @@ func TestAccAWSCognitoUserPoolClient_allFieldsUpdatingOneField(t *testing.T) { clientName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoUserPoolClientDestroy, Steps: []resource.TestStep{ diff --git a/aws/resource_aws_cognito_user_pool_domain_test.go b/aws/resource_aws_cognito_user_pool_domain_test.go index 6a5cd62fc63..a8de6284ac3 100644 --- a/aws/resource_aws_cognito_user_pool_domain_test.go +++ b/aws/resource_aws_cognito_user_pool_domain_test.go @@ -18,7 +18,7 @@ func TestAccAWSCognitoUserPoolDomain_basic(t *testing.T) { poolName := fmt.Sprintf("tf-acc-test-pool-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoUserPoolDomainDestroy, Steps: []resource.TestStep{ @@ -70,7 +70,7 @@ func TestAccAWSCognitoUserPoolDomain_custom(t *testing.T) { } resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoUserPoolDomainDestroy, Steps: []resource.TestStep{ diff --git a/aws/resource_aws_cognito_user_pool_test.go b/aws/resource_aws_cognito_user_pool_test.go index 91da57bc84f..aee5238545d 100644 --- a/aws/resource_aws_cognito_user_pool_test.go +++ b/aws/resource_aws_cognito_user_pool_test.go @@ -74,7 +74,7 @@ func TestAccAWSCognitoUserPool_importBasic(t *testing.T) { name := acctest.RandString(5) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCloudWatchDashboardDestroy, Steps: []resource.TestStep{ @@ -94,7 +94,7 @@ func TestAccAWSCognitoUserPool_basic(t *testing.T) { name := acctest.RandString(5) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoUserPoolDestroy, Steps: []resource.TestStep{ @@ -119,7 +119,7 @@ func TestAccAWSCognitoUserPool_withAdminCreateUserConfiguration(t *testing.T) { name := acctest.RandString(5) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoUserPoolDestroy, Steps: []resource.TestStep{ @@ -152,7 +152,7 @@ func TestAccAWSCognitoUserPool_withAdvancedSecurityMode(t *testing.T) { name := acctest.RandString(5) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoUserPoolDestroy, Steps: []resource.TestStep{ @@ -183,7 +183,7 @@ func TestAccAWSCognitoUserPool_withDeviceConfiguration(t *testing.T) { name := acctest.RandString(5) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoUserPoolDestroy, Steps: []resource.TestStep{ @@ -214,7 +214,7 @@ func TestAccAWSCognitoUserPool_withEmailVerificationMessage(t *testing.T) { upatedMessage := fmt.Sprintf("%s {####}", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoUserPoolDestroy, Steps: []resource.TestStep{ @@ -245,7 +245,7 @@ func TestAccAWSCognitoUserPool_withSmsVerificationMessage(t *testing.T) { upatedVerificationMessage := fmt.Sprintf("%s {####}", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoUserPoolDestroy, Steps: []resource.TestStep{ @@ -272,7 +272,7 @@ func TestAccAWSCognitoUserPool_withEmailConfiguration(t *testing.T) { name := acctest.RandString(5) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoUserPoolDestroy, Steps: []resource.TestStep{ @@ -299,7 +299,7 @@ func TestAccAWSCognitoUserPool_withSmsConfiguration(t *testing.T) { name := acctest.RandString(5) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoUserPoolDestroy, Steps: []resource.TestStep{ @@ -320,7 +320,7 @@ func TestAccAWSCognitoUserPool_withSmsConfigurationUpdated(t *testing.T) { name := acctest.RandString(5) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoUserPoolDestroy, Steps: []resource.TestStep{ @@ -347,7 +347,7 @@ func TestAccAWSCognitoUserPool_withTags(t *testing.T) { name := acctest.RandString(5) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoUserPoolDestroy, Steps: []resource.TestStep{ @@ -373,7 +373,7 @@ func TestAccAWSCognitoUserPool_withAliasAttributes(t *testing.T) { name := acctest.RandString(5) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoUserPoolDestroy, Steps: []resource.TestStep{ @@ -404,7 +404,7 @@ func TestAccAWSCognitoUserPool_withPasswordPolicy(t *testing.T) { name := acctest.RandString(5) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoUserPoolDestroy, Steps: []resource.TestStep{ @@ -439,7 +439,7 @@ func TestAccAWSCognitoUserPool_withLambdaConfig(t *testing.T) { name := acctest.RandString(5) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoUserPoolDestroy, Steps: []resource.TestStep{ @@ -484,7 +484,7 @@ func TestAccAWSCognitoUserPool_withSchemaAttributes(t *testing.T) { name := acctest.RandString(5) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoUserPoolDestroy, Steps: []resource.TestStep{ @@ -557,7 +557,7 @@ func TestAccAWSCognitoUserPool_withVerificationMessageTemplate(t *testing.T) { name := acctest.RandString(5) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoUserPoolDestroy, Steps: []resource.TestStep{ @@ -591,7 +591,7 @@ func TestAccAWSCognitoUserPool_update(t *testing.T) { updatedAuthenticationMessage := fmt.Sprintf("%s {####}", acctest.RandString(10)) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSCognitoUserPoolDestroy, Steps: []resource.TestStep{ @@ -719,6 +719,24 @@ func testAccCheckAWSCognitoUserPoolExists(name string) resource.TestCheckFunc { } } +func testAccPreCheckAWSCognitoIdentityProvider(t *testing.T) { + conn := testAccProvider.Meta().(*AWSClient).cognitoidpconn + + input := &cognitoidentityprovider.ListUserPoolsInput{ + MaxResults: aws.Int64(int64(1)), + } + + _, err := conn.ListUserPools(input) + + if testAccPreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + func testAccAWSCognitoUserPoolConfig_basic(name string) string { return fmt.Sprintf(` resource "aws_cognito_user_pool" "pool" { diff --git a/aws/resource_aws_dlm_lifecycle_policy_test.go b/aws/resource_aws_dlm_lifecycle_policy_test.go index d5037fbeb7d..21277f8f051 100644 --- a/aws/resource_aws_dlm_lifecycle_policy_test.go +++ b/aws/resource_aws_dlm_lifecycle_policy_test.go @@ -16,7 +16,7 @@ func TestAccAWSDlmLifecyclePolicy_Basic(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSDlm(t) }, Providers: testAccProviders, CheckDestroy: dlmLifecyclePolicyDestroy, Steps: []resource.TestStep{ @@ -50,7 +50,7 @@ func TestAccAWSDlmLifecyclePolicy_Full(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSDlm(t) }, Providers: testAccProviders, CheckDestroy: dlmLifecyclePolicyDestroy, Steps: []resource.TestStep{ @@ -147,6 +147,22 @@ func checkDlmLifecyclePolicyExists(name string) resource.TestCheckFunc { } } +func testAccPreCheckAWSDlm(t *testing.T) { + conn := testAccProvider.Meta().(*AWSClient).dlmconn + + input := &dlm.GetLifecyclePoliciesInput{} + + _, err := conn.GetLifecyclePolicies(input) + + if testAccPreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + func dlmLifecyclePolicyBasicConfig(rName string) string { return fmt.Sprintf(` resource "aws_iam_role" "dlm_lifecycle_role" { diff --git a/aws/resource_aws_eks_cluster_test.go b/aws/resource_aws_eks_cluster_test.go index 54528ac9280..c52c78d9be3 100644 --- a/aws/resource_aws_eks_cluster_test.go +++ b/aws/resource_aws_eks_cluster_test.go @@ -78,7 +78,7 @@ func TestAccAWSEksCluster_basic(t *testing.T) { resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSEks(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSEksClusterDestroy, Steps: []resource.TestStep{ @@ -118,7 +118,7 @@ func TestAccAWSEksCluster_Version(t *testing.T) { resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSEks(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSEksClusterDestroy, Steps: []resource.TestStep{ @@ -153,7 +153,7 @@ func TestAccAWSEksCluster_Logging(t *testing.T) { resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSEks(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSEksClusterDestroy, Steps: []resource.TestStep{ @@ -200,7 +200,7 @@ func TestAccAWSEksCluster_VpcConfig_SecurityGroupIds(t *testing.T) { resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSEks(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSEksClusterDestroy, Steps: []resource.TestStep{ @@ -228,7 +228,7 @@ func TestAccAWSEksCluster_VpcConfig_EndpointPrivateAccess(t *testing.T) { resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSEks(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSEksClusterDestroy, Steps: []resource.TestStep{ @@ -274,7 +274,7 @@ func TestAccAWSEksCluster_VpcConfig_EndpointPublicAccess(t *testing.T) { resourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSEks(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSEksClusterDestroy, Steps: []resource.TestStep{ @@ -390,6 +390,22 @@ func testAccCheckAWSEksClusterNotRecreated(i, j *eks.Cluster) resource.TestCheck } } +func testAccPreCheckAWSEks(t *testing.T) { + conn := testAccProvider.Meta().(*AWSClient).eksconn + + input := &eks.ListClustersInput{} + + _, err := conn.ListClusters(input) + + if testAccPreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + func testAccAWSEksClusterConfig_Base(rName string) string { return fmt.Sprintf(` data "aws_availability_zones" "available" {} diff --git a/aws/resource_aws_elastic_transcoder_pipeline_test.go b/aws/resource_aws_elastic_transcoder_pipeline_test.go index e53847514ca..b00d914f31a 100644 --- a/aws/resource_aws_elastic_transcoder_pipeline_test.go +++ b/aws/resource_aws_elastic_transcoder_pipeline_test.go @@ -22,7 +22,7 @@ func TestAccAWSElasticTranscoderPipeline_basic(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSElasticTranscoder(t) }, IDRefreshName: "aws_elastictranscoder_pipeline.bar", Providers: testAccProviders, CheckDestroy: testAccCheckElasticTranscoderPipelineDestroy, @@ -49,7 +49,7 @@ func TestAccAWSElasticTranscoderPipeline_kmsKey(t *testing.T) { keyRegex := regexp.MustCompile(`^arn:aws:([a-zA-Z0-9\-])+:([a-z]{2}-[a-z]+-\d{1})?:(\d{12})?:(.*)$`) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSElasticTranscoder(t) }, IDRefreshName: "aws_elastictranscoder_pipeline.bar", Providers: testAccProviders, CheckDestroy: testAccCheckElasticTranscoderPipelineDestroy, @@ -71,7 +71,7 @@ func TestAccAWSElasticTranscoderPipeline_notifications(t *testing.T) { rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSElasticTranscoder(t) }, IDRefreshName: "aws_elastictranscoder_pipeline.bar", Providers: testAccProviders, CheckDestroy: testAccCheckElasticTranscoderPipelineDestroy, @@ -136,7 +136,7 @@ func TestAccAWSElasticTranscoderPipeline_withContentConfig(t *testing.T) { rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSElasticTranscoder(t) }, IDRefreshName: "aws_elastictranscoder_pipeline.bar", Providers: testAccProviders, CheckDestroy: testAccCheckElasticTranscoderPipelineDestroy, @@ -163,7 +163,7 @@ func TestAccAWSElasticTranscoderPipeline_withPermissions(t *testing.T) { rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSElasticTranscoder(t) }, IDRefreshName: "aws_elastictranscoder_pipeline.baz", Providers: testAccProviders, CheckDestroy: testAccCheckElasticTranscoderPipelineDestroy, @@ -236,6 +236,22 @@ func testAccCheckElasticTranscoderPipelineDestroy(s *terraform.State) error { return nil } +func testAccPreCheckAWSElasticTranscoder(t *testing.T) { + conn := testAccProvider.Meta().(*AWSClient).elastictranscoderconn + + input := &elastictranscoder.ListPipelinesInput{} + + _, err := conn.ListPipelines(input) + + if testAccPreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + func awsElasticTranscoderPipelineConfigBasic(rName string) string { return fmt.Sprintf(` resource "aws_elastictranscoder_pipeline" "bar" { diff --git a/aws/resource_aws_elastic_transcoder_preset_test.go b/aws/resource_aws_elastic_transcoder_preset_test.go index 4afbc6f64c5..e105486cc3d 100644 --- a/aws/resource_aws_elastic_transcoder_preset_test.go +++ b/aws/resource_aws_elastic_transcoder_preset_test.go @@ -15,7 +15,7 @@ func TestAccAWSElasticTranscoderPreset_import(t *testing.T) { resourceName := "aws_elastictranscoder_preset.bar" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSElasticTranscoder(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckElasticTranscoderPresetDestroy, Steps: []resource.TestStep{ @@ -75,7 +75,7 @@ func TestAccAWSElasticTranscoderPreset_basic(t *testing.T) { } resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSElasticTranscoder(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckElasticTranscoderPresetDestroy, Steps: []resource.TestStep{ diff --git a/aws/resource_aws_kinesis_analytics_application_test.go b/aws/resource_aws_kinesis_analytics_application_test.go index ec6afa71ed9..95c7c545082 100644 --- a/aws/resource_aws_kinesis_analytics_application_test.go +++ b/aws/resource_aws_kinesis_analytics_application_test.go @@ -17,7 +17,7 @@ func TestAccAWSKinesisAnalyticsApplication_basic(t *testing.T) { rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSKinesisAnalytics(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckKinesisAnalyticsApplicationDestroy, Steps: []resource.TestStep{ @@ -44,7 +44,7 @@ func TestAccAWSKinesisAnalyticsApplication_update(t *testing.T) { rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSKinesisAnalytics(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckKinesisAnalyticsApplicationDestroy, Steps: []resource.TestStep{ @@ -78,7 +78,7 @@ func TestAccAWSKinesisAnalyticsApplication_addCloudwatchLoggingOptions(t *testin thirdStep := testAccKinesisAnalyticsApplication_prereq(rInt) + testAccKinesisAnalyticsApplication_cloudwatchLoggingOptions(rInt, "testStream") resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSKinesisAnalytics(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckKinesisAnalyticsApplicationDestroy, Steps: []resource.TestStep{ @@ -115,7 +115,7 @@ func TestAccAWSKinesisAnalyticsApplication_updateCloudwatchLoggingOptions(t *tes secondStep := testAccKinesisAnalyticsApplication_prereq(rInt) + testAccKinesisAnalyticsApplication_cloudwatchLoggingOptions(rInt, "testStream2") resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSKinesisAnalytics(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckKinesisAnalyticsApplicationDestroy, Steps: []resource.TestStep{ @@ -152,7 +152,7 @@ func TestAccAWSKinesisAnalyticsApplication_inputsKinesisFirehose(t *testing.T) { rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSKinesisAnalytics(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckKinesisAnalyticsApplicationDestroy, Steps: []resource.TestStep{ @@ -179,7 +179,7 @@ func TestAccAWSKinesisAnalyticsApplication_inputsKinesisStream(t *testing.T) { rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSKinesisAnalytics(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckKinesisAnalyticsApplicationDestroy, Steps: []resource.TestStep{ @@ -215,7 +215,7 @@ func TestAccAWSKinesisAnalyticsApplication_inputsAdd(t *testing.T) { secondStep := testAccKinesisAnalyticsApplication_prereq(rInt) + testAccKinesisAnalyticsApplication_inputsKinesisStream(rInt) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSKinesisAnalytics(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckKinesisAnalyticsApplicationDestroy, Steps: []resource.TestStep{ @@ -259,7 +259,7 @@ func TestAccAWSKinesisAnalyticsApplication_inputsUpdateKinesisStream(t *testing. secondStep := testAccKinesisAnalyticsApplication_prereq(rInt) + testAccKinesisAnalyticsApplication_inputsUpdateKinesisStream(rInt, "testStream") resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSKinesisAnalytics(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckKinesisAnalyticsApplicationDestroy, Steps: []resource.TestStep{ @@ -303,7 +303,7 @@ func TestAccAWSKinesisAnalyticsApplication_outputsKinesisStream(t *testing.T) { firstStep := testAccKinesisAnalyticsApplication_prereq(rInt) + testAccKinesisAnalyticsApplication_outputsKinesisStream(rInt) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSKinesisAnalytics(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckKinesisAnalyticsApplicationDestroy, Steps: []resource.TestStep{ @@ -336,7 +336,7 @@ func TestAccAWSKinesisAnalyticsApplication_outputsMultiple(t *testing.T) { step := testAccKinesisAnalyticsApplication_prereq(rInt1) + testAccKinesisAnalyticsApplication_outputsMultiple(rInt1, rInt2) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSKinesisAnalytics(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckKinesisAnalyticsApplicationDestroy, Steps: []resource.TestStep{ @@ -364,7 +364,7 @@ func TestAccAWSKinesisAnalyticsApplication_outputsAdd(t *testing.T) { secondStep := testAccKinesisAnalyticsApplication_prereq(rInt) + testAccKinesisAnalyticsApplication_outputsKinesisStream(rInt) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSKinesisAnalytics(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckKinesisAnalyticsApplicationDestroy, Steps: []resource.TestStep{ @@ -404,7 +404,7 @@ func TestAccAWSKinesisAnalyticsApplication_outputsUpdateKinesisStream(t *testing secondStep := testAccKinesisAnalyticsApplication_prereq(rInt) + testAccKinesisAnalyticsApplication_outputsUpdateKinesisStream(rInt, "testStream") resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSKinesisAnalytics(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckKinesisAnalyticsApplicationDestroy, Steps: []resource.TestStep{ @@ -450,7 +450,7 @@ func TestAccAWSKinesisAnalyticsApplication_Outputs_Lambda_Add(t *testing.T) { rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSKinesisAnalytics(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckKinesisAnalyticsApplicationDestroy, Steps: []resource.TestStep{ @@ -490,7 +490,7 @@ func TestAccAWSKinesisAnalyticsApplication_Outputs_Lambda_Create(t *testing.T) { rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSKinesisAnalytics(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckKinesisAnalyticsApplicationDestroy, Steps: []resource.TestStep{ @@ -521,7 +521,7 @@ func TestAccAWSKinesisAnalyticsApplication_referenceDataSource(t *testing.T) { firstStep := testAccKinesisAnalyticsApplication_prereq(rInt) + testAccKinesisAnalyticsApplication_referenceDataSource(rInt) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSKinesisAnalytics(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckKinesisAnalyticsApplicationDestroy, Steps: []resource.TestStep{ @@ -554,7 +554,7 @@ func TestAccAWSKinesisAnalyticsApplication_referenceDataSourceUpdate(t *testing. secondStep := testAccKinesisAnalyticsApplication_prereq(rInt) + testAccKinesisAnalyticsApplication_referenceDataSourceUpdate(rInt) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSKinesisAnalytics(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckKinesisAnalyticsApplicationDestroy, Steps: []resource.TestStep{ @@ -589,7 +589,7 @@ func TestAccAWSKinesisAnalyticsApplication_tags(t *testing.T) { rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSKinesisAnalytics(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckKinesisAnalyticsApplicationDestroy, Steps: []resource.TestStep{ @@ -684,6 +684,22 @@ func testAccCheckKinesisAnalyticsApplicationExists(n string, application *kinesi } } +func testAccPreCheckAWSKinesisAnalytics(t *testing.T) { + conn := testAccProvider.Meta().(*AWSClient).kinesisanalyticsconn + + input := &kinesisanalytics.ListApplicationsInput{} + + _, err := conn.ListApplications(input) + + if testAccPreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + func testAccKinesisAnalyticsApplication_basic(rInt int) string { return fmt.Sprintf(` resource "aws_kinesis_analytics_application" "test" { diff --git a/aws/resource_aws_launch_template.go b/aws/resource_aws_launch_template.go index a052486a422..be58da52a4f 100644 --- a/aws/resource_aws_launch_template.go +++ b/aws/resource_aws_launch_template.go @@ -1421,7 +1421,10 @@ func readInstanceMarketOptionsFromConfig(imo map[string]interface{}) (*ec2.Launc if v, ok := imo["spot_options"]; ok { vL := v.([]interface{}) for _, v := range vL { - so := v.(map[string]interface{}) + so, ok := v.(map[string]interface{}) + if !ok { + continue + } if v, ok := so["block_duration_minutes"].(int); ok && v != 0 { spotOptions.BlockDurationMinutes = aws.Int64(int64(v)) diff --git a/aws/resource_aws_lightsail_domain_test.go b/aws/resource_aws_lightsail_domain_test.go index cf9898e6b17..3927455dff9 100644 --- a/aws/resource_aws_lightsail_domain_test.go +++ b/aws/resource_aws_lightsail_domain_test.go @@ -18,7 +18,7 @@ func TestAccAWSLightsailDomain_basic(t *testing.T) { lightsailDomainName := fmt.Sprintf("tf-test-lightsail-%s.com", acctest.RandString(5)) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSLightsail(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSLightsailDomainDestroy, Steps: []resource.TestStep{ @@ -51,7 +51,7 @@ func TestAccAWSLightsailDomain_disappears(t *testing.T) { } resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSLightsail(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSLightsailDomainDestroy, Steps: []resource.TestStep{ diff --git a/aws/resource_aws_lightsail_instance_test.go b/aws/resource_aws_lightsail_instance_test.go index 1a04e55e1ec..938f198134d 100644 --- a/aws/resource_aws_lightsail_instance_test.go +++ b/aws/resource_aws_lightsail_instance_test.go @@ -19,7 +19,7 @@ func TestAccAWSLightsailInstance_basic(t *testing.T) { lightsailName := fmt.Sprintf("tf-test-lightsail-%d", acctest.RandInt()) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSLightsail(t) }, IDRefreshName: "aws_lightsail_instance.lightsail_instance_test", Providers: testAccProviders, CheckDestroy: testAccCheckAWSLightsailInstanceDestroy, @@ -43,7 +43,7 @@ func TestAccAWSLightsailInstance_euRegion(t *testing.T) { lightsailName := fmt.Sprintf("tf-test-lightsail-%d", acctest.RandInt()) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSLightsail(t) }, IDRefreshName: "aws_lightsail_instance.lightsail_instance_test", Providers: testAccProviders, CheckDestroy: testAccCheckAWSLightsailInstanceDestroy, @@ -84,7 +84,7 @@ func TestAccAWSLightsailInstance_disapear(t *testing.T) { } resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSLightsail(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSLightsailInstanceDestroy, Steps: []resource.TestStep{ @@ -160,6 +160,22 @@ func testAccCheckAWSLightsailInstanceDestroy(s *terraform.State) error { return nil } +func testAccPreCheckAWSLightsail(t *testing.T) { + conn := testAccProvider.Meta().(*AWSClient).lightsailconn + + input := &lightsail.GetInstancesInput{} + + _, err := conn.GetInstances(input) + + if testAccPreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + func testAccAWSLightsailInstanceConfig_basic(lightsailName string) string { return fmt.Sprintf(` provider "aws" { diff --git a/aws/resource_aws_lightsail_key_pair_test.go b/aws/resource_aws_lightsail_key_pair_test.go index bb40d7c98d8..d68a2ba7191 100644 --- a/aws/resource_aws_lightsail_key_pair_test.go +++ b/aws/resource_aws_lightsail_key_pair_test.go @@ -18,7 +18,7 @@ func TestAccAWSLightsailKeyPair_basic(t *testing.T) { lightsailName := fmt.Sprintf("tf-test-lightsail-%d", acctest.RandInt()) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSLightsail(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSLightsailKeyPairDestroy, Steps: []resource.TestStep{ @@ -41,7 +41,7 @@ func TestAccAWSLightsailKeyPair_imported(t *testing.T) { lightsailName := fmt.Sprintf("tf-test-lightsail-%d", acctest.RandInt()) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSLightsail(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSLightsailKeyPairDestroy, Steps: []resource.TestStep{ @@ -66,7 +66,7 @@ func TestAccAWSLightsailKeyPair_encrypted(t *testing.T) { lightsailName := fmt.Sprintf("tf-test-lightsail-%d", acctest.RandInt()) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSLightsail(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSLightsailKeyPairDestroy, Steps: []resource.TestStep{ @@ -90,7 +90,7 @@ func TestAccAWSLightsailKeyPair_nameprefix(t *testing.T) { var conf1, conf2 lightsail.KeyPair resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSLightsail(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSLightsailKeyPairDestroy, Steps: []resource.TestStep{ diff --git a/aws/resource_aws_lightsail_static_ip_attachment_test.go b/aws/resource_aws_lightsail_static_ip_attachment_test.go index 66b9c4c64af..9e55c2092d5 100644 --- a/aws/resource_aws_lightsail_static_ip_attachment_test.go +++ b/aws/resource_aws_lightsail_static_ip_attachment_test.go @@ -20,7 +20,7 @@ func TestAccAWSLightsailStaticIpAttachment_basic(t *testing.T) { keypairName := fmt.Sprintf("tf-test-lightsail-%s", acctest.RandString(5)) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSLightsail(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSLightsailStaticIpAttachmentDestroy, Steps: []resource.TestStep{ @@ -54,7 +54,7 @@ func TestAccAWSLightsailStaticIpAttachment_disappears(t *testing.T) { } resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSLightsail(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSLightsailStaticIpAttachmentDestroy, Steps: []resource.TestStep{ diff --git a/aws/resource_aws_lightsail_static_ip_test.go b/aws/resource_aws_lightsail_static_ip_test.go index a2fd8afead8..b6b7b674020 100644 --- a/aws/resource_aws_lightsail_static_ip_test.go +++ b/aws/resource_aws_lightsail_static_ip_test.go @@ -71,7 +71,7 @@ func TestAccAWSLightsailStaticIp_basic(t *testing.T) { staticIpName := fmt.Sprintf("tf-test-lightsail-%s", acctest.RandString(5)) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSLightsail(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSLightsailStaticIpDestroy, Steps: []resource.TestStep{ @@ -103,7 +103,7 @@ func TestAccAWSLightsailStaticIp_disappears(t *testing.T) { } resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSLightsail(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSLightsailStaticIpDestroy, Steps: []resource.TestStep{ diff --git a/aws/resource_aws_macie_member_account_association_test.go b/aws/resource_aws_macie_member_account_association_test.go index fe781e6d9d1..554f7dbd56f 100644 --- a/aws/resource_aws_macie_member_account_association_test.go +++ b/aws/resource_aws_macie_member_account_association_test.go @@ -19,7 +19,7 @@ func TestAccAWSMacieMemberAccountAssociation_basic(t *testing.T) { } resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSMacie(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSMacieMemberAccountAssociationDestroy, Steps: []resource.TestStep{ @@ -35,7 +35,7 @@ func TestAccAWSMacieMemberAccountAssociation_basic(t *testing.T) { func TestAccAWSMacieMemberAccountAssociation_self(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSMacie(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ { diff --git a/aws/resource_aws_macie_s3_bucket_association_test.go b/aws/resource_aws_macie_s3_bucket_association_test.go index 304a780bd0d..45f4a7ab1df 100644 --- a/aws/resource_aws_macie_s3_bucket_association_test.go +++ b/aws/resource_aws_macie_s3_bucket_association_test.go @@ -15,7 +15,7 @@ func TestAccAWSMacieS3BucketAssociation_basic(t *testing.T) { rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSMacie(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSMacieS3BucketAssociationDestroy, Steps: []resource.TestStep{ @@ -43,7 +43,7 @@ func TestAccAWSMacieS3BucketAssociation_accountIdAndPrefix(t *testing.T) { rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSMacie(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSMacieS3BucketAssociationDestroy, Steps: []resource.TestStep{ @@ -141,6 +141,22 @@ func testAccCheckAWSMacieS3BucketAssociationExists(name string) resource.TestChe } } +func testAccPreCheckAWSMacie(t *testing.T) { + conn := testAccProvider.Meta().(*AWSClient).macieconn + + input := &macie.ListS3ResourcesInput{} + + _, err := conn.ListS3Resources(input) + + if testAccPreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + func testAccAWSMacieS3BucketAssociationConfig_basic(randInt int) string { return fmt.Sprintf(` resource "aws_s3_bucket" "test" { diff --git a/aws/resource_aws_media_package_channel_test.go b/aws/resource_aws_media_package_channel_test.go index 32fb5dcb12c..e889758dc3e 100644 --- a/aws/resource_aws_media_package_channel_test.go +++ b/aws/resource_aws_media_package_channel_test.go @@ -16,7 +16,7 @@ func TestAccAWSMediaPackageChannel_basic(t *testing.T) { resourceName := "aws_media_package_channel.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSMediaPackage(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAwsMediaPackageChannelDestroy, Steps: []resource.TestStep{ @@ -47,7 +47,7 @@ func TestAccAWSMediaPackageChannel_description(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSMediaPackage(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAwsMediaPackageChannelDestroy, Steps: []resource.TestStep{ @@ -79,7 +79,7 @@ func TestAccAWSMediaPackageChannel_tags(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSMediaPackage(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAwsMediaPackageChannelDestroy, Steps: []resource.TestStep{ @@ -161,6 +161,22 @@ func testAccCheckAwsMediaPackageChannelExists(name string) resource.TestCheckFun } } +func testAccPreCheckAWSMediaPackage(t *testing.T) { + conn := testAccProvider.Meta().(*AWSClient).mediapackageconn + + input := &mediapackage.ListChannelsInput{} + + _, err := conn.ListChannels(input) + + if testAccPreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + func testAccMediaPackageChannelConfig(rName string) string { return fmt.Sprintf(` resource "aws_media_package_channel" "test" { diff --git a/aws/resource_aws_media_store_container_policy_test.go b/aws/resource_aws_media_store_container_policy_test.go index 4b275c13b16..f375c8cad14 100644 --- a/aws/resource_aws_media_store_container_policy_test.go +++ b/aws/resource_aws_media_store_container_policy_test.go @@ -15,7 +15,7 @@ func TestAccAWSMediaStoreContainerPolicy_basic(t *testing.T) { rname := acctest.RandString(5) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSMediaStore(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAwsMediaStoreContainerPolicyDestroy, Steps: []resource.TestStep{ @@ -43,7 +43,7 @@ func TestAccAWSMediaStoreContainerPolicy_import(t *testing.T) { resourceName := "aws_media_store_container_policy.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSMediaStore(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAwsMediaStoreContainerPolicyDestroy, Steps: []resource.TestStep{ diff --git a/aws/resource_aws_media_store_container_test.go b/aws/resource_aws_media_store_container_test.go index 9f70694b14a..ab609c14d1c 100644 --- a/aws/resource_aws_media_store_container_test.go +++ b/aws/resource_aws_media_store_container_test.go @@ -13,7 +13,7 @@ import ( func TestAccAWSMediaStoreContainer_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSMediaStore(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAwsMediaStoreContainerDestroy, Steps: []resource.TestStep{ @@ -31,7 +31,7 @@ func TestAccAWSMediaStoreContainer_import(t *testing.T) { resourceName := "aws_media_store_container.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSMediaStore(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAwsMediaStoreContainerDestroy, Steps: []resource.TestStep{ @@ -93,6 +93,22 @@ func testAccCheckAwsMediaStoreContainerExists(name string) resource.TestCheckFun } } +func testAccPreCheckAWSMediaStore(t *testing.T) { + conn := testAccProvider.Meta().(*AWSClient).mediastoreconn + + input := &mediastore.ListContainersInput{} + + _, err := conn.ListContainers(input) + + if testAccPreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + func testAccMediaStoreContainerConfig(rName string) string { return fmt.Sprintf(` resource "aws_media_store_container" "test" { diff --git a/aws/resource_aws_mq_broker_test.go b/aws/resource_aws_mq_broker_test.go index 8164ba96764..6b9617404f1 100644 --- a/aws/resource_aws_mq_broker_test.go +++ b/aws/resource_aws_mq_broker_test.go @@ -238,7 +238,7 @@ func TestAccAWSMqBroker_basic(t *testing.T) { brokerName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(5)) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSMq(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAwsMqBrokerDestroy, Steps: []resource.TestStep{ @@ -308,7 +308,7 @@ func TestAccAWSMqBroker_allFieldsDefaultVpc(t *testing.T) { ` resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSMq(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAwsMqBrokerDestroy, Steps: []resource.TestStep{ @@ -417,7 +417,7 @@ func TestAccAWSMqBroker_allFieldsCustomVpc(t *testing.T) { ` resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSMq(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAwsMqBrokerDestroy, Steps: []resource.TestStep{ @@ -512,7 +512,7 @@ func TestAccAWSMqBroker_updateUsers(t *testing.T) { brokerName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(5)) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSMq(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAwsMqBrokerDestroy, Steps: []resource.TestStep{ @@ -565,7 +565,7 @@ func TestAccAWSMqBroker_updateTags(t *testing.T) { brokerName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(5)) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSMq(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAwsMqBrokerDestroy, Steps: []resource.TestStep{ @@ -637,6 +637,22 @@ func testAccCheckAwsMqBrokerExists(name string) resource.TestCheckFunc { } } +func testAccPreCheckAWSMq(t *testing.T) { + conn := testAccProvider.Meta().(*AWSClient).mqconn + + input := &mq.ListBrokersInput{} + + _, err := conn.ListBrokers(input) + + if testAccPreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + func testAccMqBrokerConfig(sgName, brokerName string) string { return fmt.Sprintf(` resource "aws_security_group" "test" { diff --git a/aws/resource_aws_mq_configuration_test.go b/aws/resource_aws_mq_configuration_test.go index 5b5376b0d43..5f540f5239f 100644 --- a/aws/resource_aws_mq_configuration_test.go +++ b/aws/resource_aws_mq_configuration_test.go @@ -15,7 +15,7 @@ func TestAccAWSMqConfiguration_basic(t *testing.T) { configurationName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(5)) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSMq(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAwsMqConfigurationDestroy, Steps: []resource.TestStep{ @@ -51,7 +51,7 @@ func TestAccAWSMqConfiguration_withData(t *testing.T) { configurationName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(5)) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSMq(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAwsMqConfigurationDestroy, Steps: []resource.TestStep{ @@ -75,7 +75,7 @@ func TestAccAWSMqConfiguration_updateTags(t *testing.T) { configurationName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(5)) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSMq(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAwsMqConfigurationDestroy, Steps: []resource.TestStep{ diff --git a/aws/resource_aws_msk_cluster.go b/aws/resource_aws_msk_cluster.go index a07e1dc748a..41313447f66 100644 --- a/aws/resource_aws_msk_cluster.go +++ b/aws/resource_aws_msk_cluster.go @@ -30,6 +30,10 @@ func resourceAwsMskCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "bootstrap_brokers_tls": { + Type: schema.TypeString, + Computed: true, + }, "broker_node_group_info": { Type: schema.TypeList, Required: true, @@ -70,24 +74,76 @@ func resourceAwsMskCluster() *schema.Resource { "ebs_volume_size": { Type: schema.TypeInt, Required: true, - ForceNew: true, ValidateFunc: validation.IntBetween(1, 16384), }, }, }, }, + "client_authentication": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "tls": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "certificate_authority_arns": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validateArn, + }, + }, + }, + }, + }, + }, + }, + }, "cluster_name": { Type: schema.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringLenBetween(1, 64), }, - "encryption_info": { - Type: schema.TypeList, - Optional: true, + "configuration_info": { + Type: schema.TypeList, + Optional: true, + DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateArn, + }, + "revision": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntAtLeast(0), + }, + }, + }, + }, + "current_version": { + Type: schema.TypeString, Computed: true, - ForceNew: true, - MaxItems: 1, + }, + "encryption_info": { + Type: schema.TypeList, + Optional: true, + DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, + ForceNew: true, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "encryption_at_rest_kms_key_arn": { @@ -97,6 +153,34 @@ func resourceAwsMskCluster() *schema.Resource { ForceNew: true, ValidateFunc: validateArn, }, + "encryption_in_transit": { + Type: schema.TypeList, + Optional: true, + DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "client_broker": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: kafka.ClientBrokerTlsPlaintext, + ValidateFunc: validation.StringInSlice([]string{ + kafka.ClientBrokerPlaintext, + kafka.ClientBrokerTlsPlaintext, + kafka.ClientBrokerTls, + }, false), + }, + "in_cluster": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: true, + }, + }, + }, + }, }, }, }, @@ -134,37 +218,16 @@ func resourceAwsMskCluster() *schema.Resource { func resourceAwsMskClusterCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).kafkaconn - nodeInfo := d.Get("broker_node_group_info").([]interface{})[0].(map[string]interface{}) - input := &kafka.CreateClusterInput{ - ClusterName: aws.String(d.Get("cluster_name").(string)), - EnhancedMonitoring: aws.String(d.Get("enhanced_monitoring").(string)), - NumberOfBrokerNodes: aws.Int64(int64(d.Get("number_of_broker_nodes").(int))), - BrokerNodeGroupInfo: &kafka.BrokerNodeGroupInfo{ - BrokerAZDistribution: aws.String(nodeInfo["az_distribution"].(string)), - InstanceType: aws.String(nodeInfo["instance_type"].(string)), - StorageInfo: &kafka.StorageInfo{ - EbsStorageInfo: &kafka.EBSStorageInfo{ - VolumeSize: aws.Int64(int64(nodeInfo["ebs_volume_size"].(int))), - }, - }, - ClientSubnets: expandStringList(nodeInfo["client_subnets"].([]interface{})), - SecurityGroups: expandStringList(nodeInfo["security_groups"].([]interface{})), - }, - KafkaVersion: aws.String(d.Get("kafka_version").(string)), - } - - if v, ok := d.GetOk("encryption_info"); ok { - info := v.([]interface{}) - if len(info) == 1 && info[0] != nil { - i := info[0].(map[string]interface{}) - - input.EncryptionInfo = &kafka.EncryptionInfo{ - EncryptionAtRest: &kafka.EncryptionAtRest{ - DataVolumeKMSKeyId: aws.String(i["encryption_at_rest_kms_key_arn"].(string)), - }, - } - } + BrokerNodeGroupInfo: expandMskClusterBrokerNodeGroupInfo(d.Get("broker_node_group_info").([]interface{})), + ClientAuthentication: expandMskClusterClientAuthentication(d.Get("client_authentication").([]interface{})), + ClusterName: aws.String(d.Get("cluster_name").(string)), + ConfigurationInfo: expandMskClusterConfigurationInfo(d.Get("configuration_info").([]interface{})), + EncryptionInfo: expandMskClusterEncryptionInfo(d.Get("encryption_info").([]interface{})), + EnhancedMonitoring: aws.String(d.Get("enhanced_monitoring").(string)), + KafkaVersion: aws.String(d.Get("kafka_version").(string)), + NumberOfBrokerNodes: aws.Int64(int64(d.Get("number_of_broker_nodes").(int))), + Tags: tagsFromMapMskCluster(d.Get("tags").(map[string]interface{})), } out, err := conn.CreateCluster(input) @@ -175,10 +238,6 @@ func resourceAwsMskClusterCreate(d *schema.ResourceData, meta interface{}) error d.SetId(aws.StringValue(out.ClusterArn)) - if err := setTagsMskCluster(conn, d, aws.StringValue(out.ClusterArn)); err != nil { - return err - } - log.Printf("[DEBUG] Waiting for MSK cluster %q to be created", d.Id()) err = waitForMskClusterCreation(conn, d.Id()) if err != nil { @@ -232,47 +291,209 @@ func resourceAwsMskClusterRead(d *schema.ResourceData, meta interface{}) error { cluster := out.ClusterInfo - d.SetId(aws.StringValue(cluster.ClusterArn)) d.Set("arn", aws.StringValue(cluster.ClusterArn)) d.Set("bootstrap_brokers", aws.StringValue(brokerOut.BootstrapBrokerString)) + d.Set("bootstrap_brokers_tls", aws.StringValue(brokerOut.BootstrapBrokerStringTls)) if err := d.Set("broker_node_group_info", flattenMskBrokerNodeGroupInfo(cluster.BrokerNodeGroupInfo)); err != nil { return fmt.Errorf("error setting broker_node_group_info: %s", err) } + if err := d.Set("client_authentication", flattenMskClientAuthentication(cluster.ClientAuthentication)); err != nil { + return fmt.Errorf("error setting configuration_info: %s", err) + } + d.Set("cluster_name", aws.StringValue(cluster.ClusterName)) + + if err := d.Set("configuration_info", flattenMskConfigurationInfo(cluster.CurrentBrokerSoftwareInfo)); err != nil { + return fmt.Errorf("error setting configuration_info: %s", err) + } + + d.Set("current_version", aws.StringValue(cluster.CurrentVersion)) d.Set("enhanced_monitoring", aws.StringValue(cluster.EnhancedMonitoring)) - d.Set("encryption_info", flattenMskEncryptionInfo(cluster.EncryptionInfo)) - d.Set("kafka_version", aws.StringValue(cluster.CurrentBrokerSoftwareInfo.KafkaVersion)) - d.Set("number_of_broker_nodes", aws.Int64Value(cluster.NumberOfBrokerNodes)) - d.Set("zookeeper_connect_string", aws.StringValue(cluster.ZookeeperConnectString)) - listTagsOut, err := conn.ListTagsForResource(&kafka.ListTagsForResourceInput{ - ResourceArn: cluster.ClusterArn, - }) - if err != nil { - return fmt.Errorf("failed listing tags for msk cluster %q: %s", d.Id(), err) + if err := d.Set("encryption_info", flattenMskEncryptionInfo(cluster.EncryptionInfo)); err != nil { + return fmt.Errorf("error setting encryption_info: %s", err) } - if err := d.Set("tags", tagsToMapMskCluster(listTagsOut.Tags)); err != nil { + d.Set("kafka_version", aws.StringValue(cluster.CurrentBrokerSoftwareInfo.KafkaVersion)) + d.Set("number_of_broker_nodes", aws.Int64Value(cluster.NumberOfBrokerNodes)) + + if err := d.Set("tags", tagsToMapMskCluster(cluster.Tags)); err != nil { return fmt.Errorf("error setting tags: %s", err) } + d.Set("zookeeper_connect_string", aws.StringValue(cluster.ZookeeperConnectString)) + return nil } func resourceAwsMskClusterUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).kafkaconn - // currently tags are the only thing that are updatable.. - if err := setTagsMskCluster(conn, d, d.Id()); err != nil { - return fmt.Errorf("failed updating tags for msk cluster %q: %s", d.Id(), err) + if d.HasChange("broker_node_group_info.0.ebs_volume_size") { + input := &kafka.UpdateBrokerStorageInput{ + ClusterArn: aws.String(d.Id()), + CurrentVersion: aws.String(d.Get("current_version").(string)), + TargetBrokerEBSVolumeInfo: []*kafka.BrokerEBSVolumeInfo{ + { + KafkaBrokerNodeId: aws.String("All"), + VolumeSizeGB: aws.Int64(int64(d.Get("broker_node_group_info.0.ebs_volume_size").(int))), + }, + }, + } + + output, err := conn.UpdateBrokerStorage(input) + + if err != nil { + return fmt.Errorf("error updating MSK Cluster (%s) broker storage: %s", d.Id(), err) + } + + if output == nil { + return fmt.Errorf("error updating MSK Cluster (%s) broker storage: empty response", d.Id()) + } + + clusterOperationARN := aws.StringValue(output.ClusterOperationArn) + + if err := waitForMskClusterOperation(conn, clusterOperationARN); err != nil { + return fmt.Errorf("error waiting for MSK Cluster (%s) operation (%s): %s", d.Id(), clusterOperationARN, err) + } + } + + if d.HasChange("configuration_info") { + input := &kafka.UpdateClusterConfigurationInput{ + ClusterArn: aws.String(d.Id()), + ConfigurationInfo: expandMskClusterConfigurationInfo(d.Get("configuration_info").([]interface{})), + CurrentVersion: aws.String(d.Get("current_version").(string)), + } + + output, err := conn.UpdateClusterConfiguration(input) + + if err != nil { + return fmt.Errorf("error updating MSK Cluster (%s) configuration: %s", d.Id(), err) + } + + if output == nil { + return fmt.Errorf("error updating MSK Cluster (%s) configuration: empty response", d.Id()) + } + + clusterOperationARN := aws.StringValue(output.ClusterOperationArn) + + if err := waitForMskClusterOperation(conn, clusterOperationARN); err != nil { + return fmt.Errorf("error waiting for MSK Cluster (%s) operation (%s): %s", d.Id(), clusterOperationARN, err) + } + } + + if d.HasChange("tags") { + if err := setTagsMskCluster(conn, d, d.Id()); err != nil { + return fmt.Errorf("failed updating tags for msk cluster %q: %s", d.Id(), err) + } } return resourceAwsMskClusterRead(d, meta) } +func expandMskClusterBrokerNodeGroupInfo(l []interface{}) *kafka.BrokerNodeGroupInfo { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + bngi := &kafka.BrokerNodeGroupInfo{ + BrokerAZDistribution: aws.String(m["az_distribution"].(string)), + ClientSubnets: expandStringList(m["client_subnets"].([]interface{})), + InstanceType: aws.String(m["instance_type"].(string)), + SecurityGroups: expandStringList(m["security_groups"].([]interface{})), + StorageInfo: &kafka.StorageInfo{ + EbsStorageInfo: &kafka.EBSStorageInfo{ + VolumeSize: aws.Int64(int64(m["ebs_volume_size"].(int))), + }, + }, + } + + return bngi +} + +func expandMskClusterClientAuthentication(l []interface{}) *kafka.ClientAuthentication { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + ca := &kafka.ClientAuthentication{ + Tls: expandMskClusterTls(m["tls"].([]interface{})), + } + + return ca +} + +func expandMskClusterConfigurationInfo(l []interface{}) *kafka.ConfigurationInfo { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + ci := &kafka.ConfigurationInfo{ + Arn: aws.String(m["arn"].(string)), + Revision: aws.Int64(int64(m["revision"].(int))), + } + + return ci +} + +func expandMskClusterEncryptionInfo(l []interface{}) *kafka.EncryptionInfo { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + ei := &kafka.EncryptionInfo{ + EncryptionInTransit: expandMskClusterEncryptionInTransit(m["encryption_in_transit"].([]interface{})), + } + + if v, ok := m["encryption_at_rest_kms_key_arn"]; ok && v.(string) != "" { + ei.EncryptionAtRest = &kafka.EncryptionAtRest{ + DataVolumeKMSKeyId: aws.String(v.(string)), + } + } + + return ei +} + +func expandMskClusterEncryptionInTransit(l []interface{}) *kafka.EncryptionInTransit { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + eit := &kafka.EncryptionInTransit{ + ClientBroker: aws.String(m["client_broker"].(string)), + InCluster: aws.Bool(m["in_cluster"].(bool)), + } + + return eit +} + +func expandMskClusterTls(l []interface{}) *kafka.Tls { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + tls := &kafka.Tls{ + CertificateAuthorityArnList: expandStringSet(m["certificate_authority_arns"].(*schema.Set)), + } + + return tls +} + func flattenMskBrokerNodeGroupInfo(b *kafka.BrokerNodeGroupInfo) []map[string]interface{} { if b == nil { @@ -293,6 +514,31 @@ func flattenMskBrokerNodeGroupInfo(b *kafka.BrokerNodeGroupInfo) []map[string]in return []map[string]interface{}{m} } +func flattenMskClientAuthentication(ca *kafka.ClientAuthentication) []map[string]interface{} { + if ca == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + "tls": flattenMskTls(ca.Tls), + } + + return []map[string]interface{}{m} +} + +func flattenMskConfigurationInfo(bsi *kafka.BrokerSoftwareInfo) []map[string]interface{} { + if bsi == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + "arn": aws.StringValue(bsi.ConfigurationArn), + "revision": aws.Int64Value(bsi.ConfigurationRevision), + } + + return []map[string]interface{}{m} +} + func flattenMskEncryptionInfo(e *kafka.EncryptionInfo) []map[string]interface{} { if e == nil || e.EncryptionAtRest == nil { return []map[string]interface{}{} @@ -300,6 +546,32 @@ func flattenMskEncryptionInfo(e *kafka.EncryptionInfo) []map[string]interface{} m := map[string]interface{}{ "encryption_at_rest_kms_key_arn": aws.StringValue(e.EncryptionAtRest.DataVolumeKMSKeyId), + "encryption_in_transit": flattenMskEncryptionInTransit(e.EncryptionInTransit), + } + + return []map[string]interface{}{m} +} + +func flattenMskEncryptionInTransit(eit *kafka.EncryptionInTransit) []map[string]interface{} { + if eit == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + "client_broker": aws.StringValue(eit.ClientBroker), + "in_cluster": aws.BoolValue(eit.InCluster), + } + + return []map[string]interface{}{m} +} + +func flattenMskTls(tls *kafka.Tls) []map[string]interface{} { + if tls == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + "certificate_authority_arns": aws.StringValueSlice(tls.CertificateAuthorityArnList), } return []map[string]interface{}{m} @@ -340,3 +612,45 @@ func resourceAwsMskClusterDeleteWaiter(conn *kafka.Kafka, arn string) error { return resource.RetryableError(fmt.Errorf("timeout while waiting for the cluster %q to be deleted", arn)) }) } + +func mskClusterOperationRefreshFunc(conn *kafka.Kafka, clusterOperationARN string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + input := &kafka.DescribeClusterOperationInput{ + ClusterOperationArn: aws.String(clusterOperationARN), + } + + output, err := conn.DescribeClusterOperation(input) + + if err != nil { + return nil, "UPDATE_FAILED", fmt.Errorf("error describing MSK Cluster Operation (%s): %s", clusterOperationARN, err) + } + + if output == nil || output.ClusterOperationInfo == nil { + return nil, "UPDATE_FAILED", fmt.Errorf("error describing MSK Cluster Operation (%s): empty response", clusterOperationARN) + } + + state := aws.StringValue(output.ClusterOperationInfo.OperationState) + + if state == "UPDATE_FAILED" && output.ClusterOperationInfo.ErrorInfo != nil { + errorInfo := output.ClusterOperationInfo.ErrorInfo + err := fmt.Errorf("error code: %s, error string: %s", aws.StringValue(errorInfo.ErrorCode), aws.StringValue(errorInfo.ErrorString)) + return output.ClusterOperationInfo, state, err + } + + return output.ClusterOperationInfo, state, nil + } +} + +func waitForMskClusterOperation(conn *kafka.Kafka, clusterOperationARN string) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING", "UPDATE_IN_PROGRESS"}, + Target: []string{"UPDATE_COMPLETE"}, + Refresh: mskClusterOperationRefreshFunc(conn, clusterOperationARN), + Timeout: 60 * time.Minute, + } + + log.Printf("[DEBUG] Waiting for MSK Cluster Operation (%s) completion", clusterOperationARN) + _, err := stateConf.WaitForState() + + return err +} diff --git a/aws/resource_aws_msk_cluster_test.go b/aws/resource_aws_msk_cluster_test.go index b0f285326d2..608043f2073 100644 --- a/aws/resource_aws_msk_cluster_test.go +++ b/aws/resource_aws_msk_cluster_test.go @@ -57,9 +57,8 @@ func testSweepMskClusters(region string) error { func TestAccAWSMskCluster_basic(t *testing.T) { var cluster kafka.ClusterInfo - var td kafka.ListTagsForResourceOutput - ri := acctest.RandInt() - resourceName := "aws_msk_cluster.example" + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_msk_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -67,28 +66,145 @@ func TestAccAWSMskCluster_basic(t *testing.T) { CheckDestroy: testAccCheckMskClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccMskClusterConfig_basic(ri), + Config: testAccMskClusterConfig_basic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckMskClusterExists(resourceName, &cluster), testAccMatchResourceAttrRegionalARN(resourceName, "arn", "kafka", regexp.MustCompile(`cluster/.+`)), - testAccMatchResourceAttrRegionalARN(resourceName, "encryption_info.0.encryption_at_rest_kms_key_arn", "kms", regexp.MustCompile(`key/.+`)), - resource.TestCheckResourceAttr(resourceName, "cluster_name", fmt.Sprintf("tf-test-%d", ri)), + resource.TestMatchResourceAttr(resourceName, "bootstrap_brokers", regexp.MustCompile(`^(([-\w]+\.){1,}[\w]+:\d+,){2,}([-\w]+\.){1,}[\w]+:\d+$`)), + resource.TestMatchResourceAttr(resourceName, "bootstrap_brokers_tls", regexp.MustCompile(`^(([-\w]+\.){1,}[\w]+:\d+,){2,}([-\w]+\.){1,}[\w]+:\d+$`)), + resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.#", "1"), resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.0.az_distribution", kafka.BrokerAZDistributionDefault), - resource.TestCheckResourceAttr(resourceName, "kafka_version", "1.1.1"), - resource.TestCheckResourceAttr(resourceName, "number_of_broker_nodes", "3"), - resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.0.instance_type", "kafka.m5.large"), resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.0.ebs_volume_size", "10"), - resource.TestMatchResourceAttr(resourceName, "zookeeper_connect_string", regexp.MustCompile(`^\d+\.\d+\.\d+\.\d+:\d+,\d+\.\d+\.\d+\.\d+:\d+,\d+\.\d+\.\d+\.\d+:\d+$`)), - resource.TestMatchResourceAttr(resourceName, "bootstrap_brokers", regexp.MustCompile(`^(([-\w]+\.){1,}[\w]+:\d+,){2,}([-\w]+\.){1,}[\w]+:\d+$`)), - resource.TestCheckResourceAttr(resourceName, "enhanced_monitoring", kafka.EnhancedMonitoringDefault), resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.0.client_subnets.#", "3"), resource.TestCheckResourceAttrPair(resourceName, "broker_node_group_info.0.client_subnets.0", "aws_subnet.example_subnet_az1", "id"), resource.TestCheckResourceAttrPair(resourceName, "broker_node_group_info.0.client_subnets.1", "aws_subnet.example_subnet_az2", "id"), resource.TestCheckResourceAttrPair(resourceName, "broker_node_group_info.0.client_subnets.2", "aws_subnet.example_subnet_az3", "id"), + resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.0.instance_type", "kafka.m5.large"), resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.0.security_groups.#", "1"), resource.TestCheckResourceAttrPair(resourceName, "broker_node_group_info.0.security_groups.0", "aws_security_group.example_sg", "id"), - testAccLoadMskTags(&cluster, &td), - testAccCheckMskClusterTags(&td, "foo", "bar"), + resource.TestCheckResourceAttr(resourceName, "client_authentication.#", "0"), + resource.TestCheckResourceAttr(resourceName, "cluster_name", rName), + resource.TestCheckResourceAttr(resourceName, "configuration_info.#", "1"), + resource.TestCheckResourceAttr(resourceName, "encryption_info.#", "1"), + testAccMatchResourceAttrRegionalARN(resourceName, "encryption_info.0.encryption_at_rest_kms_key_arn", "kms", regexp.MustCompile(`key/.+`)), + resource.TestCheckResourceAttr(resourceName, "encryption_info.0.encryption_in_transit.#", "1"), + resource.TestCheckResourceAttr(resourceName, "encryption_info.0.encryption_in_transit.0.client_broker", "TLS_PLAINTEXT"), + resource.TestCheckResourceAttr(resourceName, "encryption_info.0.encryption_in_transit.0.in_cluster", "true"), + resource.TestCheckResourceAttr(resourceName, "enhanced_monitoring", kafka.EnhancedMonitoringDefault), + resource.TestCheckResourceAttr(resourceName, "kafka_version", "2.1.0"), + resource.TestCheckResourceAttr(resourceName, "number_of_broker_nodes", "3"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestMatchResourceAttr(resourceName, "zookeeper_connect_string", regexp.MustCompile(`^\d+\.\d+\.\d+\.\d+:\d+,\d+\.\d+\.\d+\.\d+:\d+,\d+\.\d+\.\d+\.\d+:\d+$`)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "bootstrap_brokers", // API may mutate ordering and selection of brokers to return + "bootstrap_brokers_tls", // API may mutate ordering and selection of brokers to return + }, + }, + }, + }) +} + +func TestAccAWSMskCluster_BrokerNodeGroupInfo_EbsVolumeSize(t *testing.T) { + var cluster1, cluster2 kafka.ClusterInfo + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_msk_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckMskClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccMskClusterConfigBrokerNodeGroupInfoEbsVolumeSize(rName, 11), + Check: resource.ComposeTestCheckFunc( + testAccCheckMskClusterExists(resourceName, &cluster1), + resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.#", "1"), + resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.0.ebs_volume_size", "11"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "bootstrap_brokers", // API may mutate ordering and selection of brokers to return + "bootstrap_brokers_tls", // API may mutate ordering and selection of brokers to return + }, + }, + { + // BadRequestException: The minimum increase in storage size of the cluster should be atleast 100GB + Config: testAccMskClusterConfigBrokerNodeGroupInfoEbsVolumeSize(rName, 112), + Check: resource.ComposeTestCheckFunc( + testAccCheckMskClusterExists(resourceName, &cluster2), + testAccCheckMskClusterNotRecreated(&cluster1, &cluster2), + resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.#", "1"), + resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.0.ebs_volume_size", "112"), + ), + }, + }, + }) +} + +func TestAccAWSMskCluster_ClientAuthentication_Tls_CertificateAuthorityArns(t *testing.T) { + t.Skip("Requires the aws_acmpca_certificate_authority resource to support importing the root CA certificate") + + var cluster1 kafka.ClusterInfo + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_msk_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckMskClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccMskClusterConfigClientAuthenticationTlsCertificateAuthorityArns(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckMskClusterExists(resourceName, &cluster1), + resource.TestCheckResourceAttr(resourceName, "client_authentication.#", "1"), + resource.TestCheckResourceAttr(resourceName, "client_authentication.0.tls.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration_info.0.tls.0.certificate_authority_arns.#", "1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "bootstrap_brokers", // API may mutate ordering and selection of brokers to return + "bootstrap_brokers_tls", // API may mutate ordering and selection of brokers to return + }, + }, + }, + }) +} + +func TestAccAWSMskCluster_ConfigurationInfo_Revision(t *testing.T) { + t.Skip("aws_msk_cluster is correctly calling UpdateClusterConfiguration however API is always returning 429 and 500 errors") + + var cluster1, cluster2 kafka.ClusterInfo + rName := acctest.RandomWithPrefix("tf-acc-test") + configurationResourceName := "aws_msk_configuration.test" + resourceName := "aws_msk_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckMskClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccMskClusterConfigConfigurationInfoRevision1(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckMskClusterExists(resourceName, &cluster1), + resource.TestCheckResourceAttr(resourceName, "configuration_info.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "configuration_info.0.arn", configurationResourceName, "arn"), + resource.TestCheckResourceAttrPair(resourceName, "configuration_info.0.revision", configurationResourceName, "latest_revision"), ), }, { @@ -96,16 +212,28 @@ func TestAccAWSMskCluster_basic(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ - "bootstrap_brokers", // API may mutate ordering and selection of brokers to return + "bootstrap_brokers", // API may mutate ordering and selection of brokers to return + "bootstrap_brokers_tls", // API may mutate ordering and selection of brokers to return }, }, + { + Config: testAccMskClusterConfigConfigurationInfoRevision2(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckMskClusterExists(resourceName, &cluster2), + testAccCheckMskClusterNotRecreated(&cluster1, &cluster2), + resource.TestCheckResourceAttr(resourceName, "configuration_info.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "configuration_info.0.arn", configurationResourceName, "arn"), + resource.TestCheckResourceAttrPair(resourceName, "configuration_info.0.revision", configurationResourceName, "latest_revision"), + ), + }, }, }) } -func TestAccAWSMskCluster_kms(t *testing.T) { + +func TestAccAWSMskCluster_EncryptionInfo_EncryptionAtRestKmsKeyArn(t *testing.T) { var cluster kafka.ClusterInfo - ri := acctest.RandInt() - resourceName := "aws_msk_cluster.example" + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_msk_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -113,7 +241,7 @@ func TestAccAWSMskCluster_kms(t *testing.T) { CheckDestroy: testAccCheckMskClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccMskClusterConfig_kms(ri), + Config: testAccMskClusterConfigEncryptionInfoEncryptionAtRestKmsKeyArn(rName), Check: resource.ComposeTestCheckFunc( testAccCheckMskClusterExists(resourceName, &cluster), resource.TestCheckResourceAttrPair(resourceName, "encryption_info.0.encryption_at_rest_kms_key_arn", "aws_kms_key.example_key", "arn"), @@ -124,17 +252,18 @@ func TestAccAWSMskCluster_kms(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ - "bootstrap_brokers", // API may mutate ordering and selection of brokers to return + "bootstrap_brokers", // API may mutate ordering and selection of brokers to return + "bootstrap_brokers_tls", // API may mutate ordering and selection of brokers to return }, }, }, }) } -func TestAccAWSMskCluster_enhancedMonitoring(t *testing.T) { - var cluster kafka.ClusterInfo - ri := acctest.RandInt() - resourceName := "aws_msk_cluster.example" +func TestAccAWSMskCluster_EncryptionInfo_EncryptionInTransit_ClientBroker(t *testing.T) { + var cluster1 kafka.ClusterInfo + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_msk_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -142,10 +271,12 @@ func TestAccAWSMskCluster_enhancedMonitoring(t *testing.T) { CheckDestroy: testAccCheckMskClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccMskClusterConfig_enhancedMonitoring(ri), + Config: testAccMskClusterConfigEncryptionInfoEncryptionInTransitClientBroker(rName, "PLAINTEXT"), Check: resource.ComposeTestCheckFunc( - testAccCheckMskClusterExists(resourceName, &cluster), - resource.TestCheckResourceAttr(resourceName, "enhanced_monitoring", kafka.EnhancedMonitoringPerBroker), + testAccCheckMskClusterExists(resourceName, &cluster1), + resource.TestCheckResourceAttr(resourceName, "encryption_info.#", "1"), + resource.TestCheckResourceAttr(resourceName, "encryption_info.0.encryption_in_transit.#", "1"), + resource.TestCheckResourceAttr(resourceName, "encryption_info.0.encryption_in_transit.0.client_broker", "PLAINTEXT"), ), }, { @@ -153,17 +284,18 @@ func TestAccAWSMskCluster_enhancedMonitoring(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ - "bootstrap_brokers", + "bootstrap_brokers", // API may mutate ordering and selection of brokers to return + "bootstrap_brokers_tls", // API may mutate ordering and selection of brokers to return }, }, }, }) } -func TestAccAWSMskCluster_tagsUpdate(t *testing.T) { - var cluster kafka.ClusterInfo - var td kafka.ListTagsForResourceOutput - ri := acctest.RandInt() - resourceName := "aws_msk_cluster.example" + +func TestAccAWSMskCluster_EncryptionInfo_EncryptionInTransit_InCluster(t *testing.T) { + var cluster1 kafka.ClusterInfo + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_msk_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -171,30 +303,61 @@ func TestAccAWSMskCluster_tagsUpdate(t *testing.T) { CheckDestroy: testAccCheckMskClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccMskClusterConfig_basic(ri), + Config: testAccMskClusterConfigEncryptionInfoEncryptionInTransitInCluster(rName, false), Check: resource.ComposeTestCheckFunc( - testAccCheckMskClusterExists(resourceName, &cluster), - testAccLoadMskTags(&cluster, &td), - testAccCheckMskClusterTags(&td, "foo", "bar"), + testAccCheckMskClusterExists(resourceName, &cluster1), + resource.TestCheckResourceAttr(resourceName, "encryption_info.#", "1"), + resource.TestCheckResourceAttr(resourceName, "encryption_info.0.encryption_in_transit.#", "1"), + resource.TestCheckResourceAttr(resourceName, "encryption_info.0.encryption_in_transit.0.in_cluster", "false"), ), }, { - Config: testAccMskClusterConfig_tagsUpdate(ri), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "bootstrap_brokers", // API may mutate ordering and selection of brokers to return + "bootstrap_brokers_tls", // API may mutate ordering and selection of brokers to return + }, + }, + }, + }) +} + +func TestAccAWSMskCluster_EnhancedMonitoring(t *testing.T) { + var cluster kafka.ClusterInfo + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_msk_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckMskClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccMskClusterConfigEnhancedMonitoring(rName, "PER_BROKER"), Check: resource.ComposeTestCheckFunc( testAccCheckMskClusterExists(resourceName, &cluster), - testAccLoadMskTags(&cluster, &td), - testAccCheckMskClusterTags(&td, "foo", "baz"), - testAccCheckMskClusterTags(&td, "new", "type"), + resource.TestCheckResourceAttr(resourceName, "enhanced_monitoring", kafka.EnhancedMonitoringPerBroker), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "bootstrap_brokers", // API may mutate ordering and selection of brokers to return + "bootstrap_brokers_tls", // API may mutate ordering and selection of brokers to return + }, + }, }, }) } -func TestAccAWSMskCluster_brokerNodes(t *testing.T) { +func TestAccAWSMskCluster_NumberOfBrokerNodes(t *testing.T) { var cluster kafka.ClusterInfo - ri := acctest.RandInt() - resourceName := "aws_msk_cluster.example" + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_msk_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -202,19 +365,58 @@ func TestAccAWSMskCluster_brokerNodes(t *testing.T) { CheckDestroy: testAccCheckMskClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccMskClusterConfig_brokerNodes(ri), + Config: testAccMskClusterConfigNumberOfBrokerNodes(rName), Check: resource.ComposeTestCheckFunc( testAccCheckMskClusterExists(resourceName, &cluster), - resource.TestCheckResourceAttr(resourceName, "number_of_broker_nodes", "6"), - resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.0.instance_type", "kafka.m5.large"), - resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.0.ebs_volume_size", "1"), + resource.TestMatchResourceAttr(resourceName, "bootstrap_brokers", regexp.MustCompile(`^(([-\w]+\.){1,}[\w]+:\d+,){2,}([-\w]+\.){1,}[\w]+:\d+$`)), + resource.TestMatchResourceAttr(resourceName, "bootstrap_brokers_tls", regexp.MustCompile(`^(([-\w]+\.){1,}[\w]+:\d+,){2,}([-\w]+\.){1,}[\w]+:\d+$`)), + resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.#", "1"), resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.0.client_subnets.#", "3"), resource.TestCheckResourceAttrPair(resourceName, "broker_node_group_info.0.client_subnets.0", "aws_subnet.example_subnet_az1", "id"), resource.TestCheckResourceAttrPair(resourceName, "broker_node_group_info.0.client_subnets.1", "aws_subnet.example_subnet_az2", "id"), resource.TestCheckResourceAttrPair(resourceName, "broker_node_group_info.0.client_subnets.2", "aws_subnet.example_subnet_az3", "id"), - resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.0.security_groups.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "broker_node_group_info.0.security_groups.0", "aws_security_group.example_sg", "id"), - resource.TestCheckResourceAttr(resourceName, "kafka_version", "2.1.0"), + resource.TestCheckResourceAttr(resourceName, "number_of_broker_nodes", "6"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "bootstrap_brokers", // API may mutate ordering and selection of brokers to return + "bootstrap_brokers_tls", // API may mutate ordering and selection of brokers to return + }, + }, + }, + }) +} + +func TestAccAWSMskCluster_Tags(t *testing.T) { + var cluster kafka.ClusterInfo + var td kafka.ListTagsForResourceOutput + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_msk_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckMskClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccMskClusterConfigTags1(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckMskClusterExists(resourceName, &cluster), + testAccLoadMskTags(&cluster, &td), + testAccCheckMskClusterTags(&td, "foo", "bar"), + ), + }, + { + Config: testAccMskClusterConfigTags2(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckMskClusterExists(resourceName, &cluster), + testAccLoadMskTags(&cluster, &td), + testAccCheckMskClusterTags(&td, "foo", "baz"), + testAccCheckMskClusterTags(&td, "new", "type"), ), }, { @@ -222,7 +424,8 @@ func TestAccAWSMskCluster_brokerNodes(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ - "bootstrap_brokers", // API may mutate ordering and selection of brokers to return + "bootstrap_brokers", // API may mutate ordering and selection of brokers to return + "bootstrap_brokers_tls", // API may mutate ordering and selection of brokers to return }, }, }, @@ -275,6 +478,16 @@ func testAccCheckMskClusterExists(n string, cluster *kafka.ClusterInfo) resource } } +func testAccCheckMskClusterNotRecreated(i, j *kafka.ClusterInfo) resource.TestCheckFunc { + return func(s *terraform.State) error { + if aws.StringValue(i.ClusterArn) != aws.StringValue(j.ClusterArn) { + return fmt.Errorf("MSK Cluster (%s) recreated", aws.StringValue(i.ClusterArn)) + } + + return nil + } +} + func testAccLoadMskTags(cluster *kafka.ClusterInfo, td *kafka.ListTagsForResourceOutput) resource.TestCheckFunc { return func(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).kafkaconn @@ -362,109 +575,295 @@ resource "aws_security_group" "example_sg" { `) } -func testAccMskClusterConfig_basic(randInt int) string { +func testAccMskClusterConfig_basic(rName string) string { return testAccMskClusterBaseConfig() + fmt.Sprintf(` +resource "aws_msk_cluster" "test" { + cluster_name = %[1]q + kafka_version = "2.1.0" + number_of_broker_nodes = 3 + + broker_node_group_info { + client_subnets = ["${aws_subnet.example_subnet_az1.id}", "${aws_subnet.example_subnet_az2.id}", "${aws_subnet.example_subnet_az3.id}"] + ebs_volume_size = 10 + instance_type = "kafka.m5.large" + security_groups = ["${aws_security_group.example_sg.id}"] + } +} +`, rName) +} -resource "aws_msk_cluster" "example" { - cluster_name = "tf-test-%d" - kafka_version = "1.1.1" - number_of_broker_nodes = 3 - broker_node_group_info { - instance_type = "kafka.m5.large" - ebs_volume_size = 10 - client_subnets = [ "${aws_subnet.example_subnet_az1.id}", "${aws_subnet.example_subnet_az2.id}", "${aws_subnet.example_subnet_az3.id}" ] - security_groups = [ "${aws_security_group.example_sg.id}" ] - } - tags = { - foo = "bar" - } +func testAccMskClusterConfigBrokerNodeGroupInfoEbsVolumeSize(rName string, ebsVolumeSize int) string { + return testAccMskClusterBaseConfig() + fmt.Sprintf(` +resource "aws_msk_cluster" "test" { + cluster_name = %[1]q + kafka_version = "2.1.0" + number_of_broker_nodes = 3 + + broker_node_group_info { + client_subnets = ["${aws_subnet.example_subnet_az1.id}", "${aws_subnet.example_subnet_az2.id}", "${aws_subnet.example_subnet_az3.id}"] + ebs_volume_size = %[2]d + instance_type = "kafka.m5.large" + security_groups = ["${aws_security_group.example_sg.id}"] + } +} +`, rName, ebsVolumeSize) +} + +func testAccMskClusterConfigClientAuthenticationTlsCertificateAuthorityArns(rName string) string { + return testAccMskClusterBaseConfig() + fmt.Sprintf(` +resource "aws_acmpca_certificate_authority" "test" { + certificate_authority_configuration { + key_algorithm = "RSA_4096" + signing_algorithm = "SHA512WITHRSA" + + subject { + common_name = "terraformtesting.com" + } + } +} + +resource "aws_msk_cluster" "test" { + cluster_name = %[1]q + kafka_version = "2.1.0" + number_of_broker_nodes = 3 + + broker_node_group_info { + client_subnets = ["${aws_subnet.example_subnet_az1.id}", "${aws_subnet.example_subnet_az2.id}", "${aws_subnet.example_subnet_az3.id}"] + ebs_volume_size = 10 + instance_type = "kafka.m5.large" + security_groups = ["${aws_security_group.example_sg.id}"] + } + + client_authentication { + tls { + certificate_authority_arns = ["${aws_acmpca_certificate_authority.test.arn}"] + } + } + + encryption_info { + encryption_in_transit { + client_broker = "TLS" + } + } +} +`, rName) +} + +func testAccMskClusterConfigConfigurationInfoRevision1(rName string) string { + return testAccMskClusterBaseConfig() + fmt.Sprintf(` +resource "aws_msk_configuration" "test" { + kafka_versions = ["2.1.0"] + name = "%[1]s-1" + + server_properties = < 0 { @@ -314,7 +316,7 @@ loop: // this occurs when a statement has not been completed if stack.top > 1 { - return nil, NewParseError(fmt.Sprintf("incomplete expression: %v", stack.container)) + return nil, NewParseError(fmt.Sprintf("incomplete ini expression")) } // returns a sublist which excludes the start symbol diff --git a/vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go b/vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go index ba9c96de99e..6ca57cc1498 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go @@ -751,6 +751,18 @@ func (c *CodeCommit) CreateRepositoryRequest(input *CreateRepositoryInput) (req // * ErrCodeEncryptionKeyUnavailableException "EncryptionKeyUnavailableException" // The encryption key is not available. // +// * ErrCodeInvalidTagsMapException "InvalidTagsMapException" +// The map of tags is not valid. +// +// * ErrCodeTooManyTagsException "TooManyTagsException" +// The maximum number of tags for an AWS CodeCommit resource has been exceeded. +// +// * ErrCodeInvalidSystemTagUsageException "InvalidSystemTagUsageException" +// The specified tag is not valid. Key names cannot be prefixed with aws:. +// +// * ErrCodeTagPolicyException "TagPolicyException" +// The tag policy is not valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/CreateRepository func (c *CodeCommit) CreateRepository(input *CreateRepositoryInput) (*CreateRepositoryOutput, error) { req, out := c.CreateRepositoryRequest(input) @@ -1518,7 +1530,7 @@ func (c *CodeCommit) GetBlobRequest(input *GetBlobInput) (req *request.Request, // * ErrCodeFileTooLargeException "FileTooLargeException" // The specified file exceeds the file size limit for AWS CodeCommit. For more // information about limits in AWS CodeCommit, see AWS CodeCommit User Guide -// (http://docs.aws.amazon.com/codecommit/latest/userguide/limits.html). +// (https://docs.aws.amazon.com/codecommit/latest/userguide/limits.html). // // See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/GetBlob func (c *CodeCommit) GetBlob(input *GetBlobInput) (*GetBlobOutput, error) { @@ -2519,7 +2531,7 @@ func (c *CodeCommit) GetFileRequest(input *GetFileInput) (req *request.Request, // * ErrCodeFileTooLargeException "FileTooLargeException" // The specified file exceeds the file size limit for AWS CodeCommit. For more // information about limits in AWS CodeCommit, see AWS CodeCommit User Guide -// (http://docs.aws.amazon.com/codecommit/latest/userguide/limits.html). +// (https://docs.aws.amazon.com/codecommit/latest/userguide/limits.html). // // See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/GetFile func (c *CodeCommit) GetFile(input *GetFileInput) (*GetFileOutput, error) { @@ -3597,6 +3609,106 @@ func (c *CodeCommit) ListRepositoriesPagesWithContext(ctx aws.Context, input *Li return p.Err() } +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/ListTagsForResource +func (c *CodeCommit) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for AWS CodeCommit. +// +// Gets information about AWS tags for a specified Amazon Resource Name (ARN) +// in AWS CodeCommit. For a list of valid resources in AWS CodeCommit, see CodeCommit +// Resources and Operations (https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-access-control-identity-based.html#arn-formats) +// in the AWS CodeCommit User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeCommit's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" +// The specified repository does not exist. +// +// * ErrCodeInvalidRepositoryNameException "InvalidRepositoryNameException" +// At least one specified repository name is not valid. +// +// This exception only occurs when a specified repository name is not valid. +// Other exceptions occur when a required repository parameter is missing, or +// when a specified repository does not exist. +// +// * ErrCodeResourceArnRequiredException "ResourceArnRequiredException" +// A valid Amazon Resource Name (ARN) for an AWS CodeCommit resource is required. +// For a list of valid resources in AWS CodeCommit, see CodeCommit Resources +// and Operations (https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-access-control-identity-based.html#arn-formats) +// in the AWS CodeCommit User Guide. +// +// * ErrCodeInvalidResourceArnException "InvalidResourceArnException" +// The value for the resource ARN is not valid. For more information about resources +// in AWS CodeCommit, see CodeCommit Resources and Operations (https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-access-control-identity-based.html#arn-formats) +// in the AWS CodeCommit User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/ListTagsForResource +func (c *CodeCommit) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeCommit) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opMergePullRequestByFastForward = "MergePullRequestByFastForward" // MergePullRequestByFastForwardRequest generates a "aws/request.Request" representing the @@ -4533,6 +4645,121 @@ func (c *CodeCommit) PutRepositoryTriggersWithContext(ctx aws.Context, input *Pu return out, req.Send() } +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/TagResource +func (c *CodeCommit) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for AWS CodeCommit. +// +// Adds or updates tags for a resource in AWS CodeCommit. For a list of valid +// resources in AWS CodeCommit, see CodeCommit Resources and Operations (https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-access-control-identity-based.html#arn-formats) +// in the AWS CodeCommit User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeCommit's +// API operation TagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" +// The specified repository does not exist. +// +// * ErrCodeInvalidRepositoryNameException "InvalidRepositoryNameException" +// At least one specified repository name is not valid. +// +// This exception only occurs when a specified repository name is not valid. +// Other exceptions occur when a required repository parameter is missing, or +// when a specified repository does not exist. +// +// * ErrCodeResourceArnRequiredException "ResourceArnRequiredException" +// A valid Amazon Resource Name (ARN) for an AWS CodeCommit resource is required. +// For a list of valid resources in AWS CodeCommit, see CodeCommit Resources +// and Operations (https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-access-control-identity-based.html#arn-formats) +// in the AWS CodeCommit User Guide. +// +// * ErrCodeInvalidResourceArnException "InvalidResourceArnException" +// The value for the resource ARN is not valid. For more information about resources +// in AWS CodeCommit, see CodeCommit Resources and Operations (https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-access-control-identity-based.html#arn-formats) +// in the AWS CodeCommit User Guide. +// +// * ErrCodeTagsMapRequiredException "TagsMapRequiredException" +// A map of tags is required. +// +// * ErrCodeInvalidTagsMapException "InvalidTagsMapException" +// The map of tags is not valid. +// +// * ErrCodeTooManyTagsException "TooManyTagsException" +// The maximum number of tags for an AWS CodeCommit resource has been exceeded. +// +// * ErrCodeInvalidSystemTagUsageException "InvalidSystemTagUsageException" +// The specified tag is not valid. Key names cannot be prefixed with aws:. +// +// * ErrCodeTagPolicyException "TagPolicyException" +// The tag policy is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/TagResource +func (c *CodeCommit) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeCommit) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opTestRepositoryTriggers = "TestRepositoryTriggers" // TestRepositoryTriggersRequest generates a "aws/request.Request" representing the @@ -4685,6 +4912,121 @@ func (c *CodeCommit) TestRepositoryTriggersWithContext(ctx aws.Context, input *T return out, req.Send() } +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UntagResource +func (c *CodeCommit) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for AWS CodeCommit. +// +// Removes tags for a resource in AWS CodeCommit. For a list of valid resources +// in AWS CodeCommit, see CodeCommit Resources and Operations (https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-access-control-identity-based.html#arn-formats) +// in the AWS CodeCommit User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeCommit's +// API operation UntagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" +// The specified repository does not exist. +// +// * ErrCodeInvalidRepositoryNameException "InvalidRepositoryNameException" +// At least one specified repository name is not valid. +// +// This exception only occurs when a specified repository name is not valid. +// Other exceptions occur when a required repository parameter is missing, or +// when a specified repository does not exist. +// +// * ErrCodeResourceArnRequiredException "ResourceArnRequiredException" +// A valid Amazon Resource Name (ARN) for an AWS CodeCommit resource is required. +// For a list of valid resources in AWS CodeCommit, see CodeCommit Resources +// and Operations (https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-access-control-identity-based.html#arn-formats) +// in the AWS CodeCommit User Guide. +// +// * ErrCodeInvalidResourceArnException "InvalidResourceArnException" +// The value for the resource ARN is not valid. For more information about resources +// in AWS CodeCommit, see CodeCommit Resources and Operations (https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-access-control-identity-based.html#arn-formats) +// in the AWS CodeCommit User Guide. +// +// * ErrCodeTagKeysListRequiredException "TagKeysListRequiredException" +// A list of tag keys is required. The list cannot be empty or null. +// +// * ErrCodeInvalidTagKeysListException "InvalidTagKeysListException" +// The list of tags is not valid. +// +// * ErrCodeTooManyTagsException "TooManyTagsException" +// The maximum number of tags for an AWS CodeCommit resource has been exceeded. +// +// * ErrCodeInvalidSystemTagUsageException "InvalidSystemTagUsageException" +// The specified tag is not valid. Key names cannot be prefixed with aws:. +// +// * ErrCodeTagPolicyException "TagPolicyException" +// The tag policy is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UntagResource +func (c *CodeCommit) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeCommit) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateComment = "UpdateComment" // UpdateCommentRequest generates a "aws/request.Request" representing the @@ -5375,7 +5717,7 @@ func (c *CodeCommit) UpdateRepositoryNameRequest(input *UpdateRepositoryNameInpu // AWS account. In addition, repository names are limited to 100 alphanumeric, // dash, and underscore characters, and cannot include certain characters. The // suffix ".git" is prohibited. For a full description of the limits on repository -// names, see Limits (http://docs.aws.amazon.com/codecommit/latest/userguide/limits.html) +// names, see Limits (https://docs.aws.amazon.com/codecommit/latest/userguide/limits.html) // in the AWS CodeCommit User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -6380,11 +6722,14 @@ type CreateRepositoryInput struct { // The repository name must be unique across the calling AWS account. In addition, // repository names are limited to 100 alphanumeric, dash, and underscore characters, // and cannot include certain characters. For a full description of the limits - // on repository names, see Limits (http://docs.aws.amazon.com/codecommit/latest/userguide/limits.html) + // on repository names, see Limits (https://docs.aws.amazon.com/codecommit/latest/userguide/limits.html) // in the AWS CodeCommit User Guide. The suffix ".git" is prohibited. // // RepositoryName is a required field RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` + + // One or more tag key-value pairs to use when tagging this repository. + Tags map[string]*string `locationName:"tags" type:"map"` } // String returns the string representation @@ -6425,6 +6770,12 @@ func (s *CreateRepositoryInput) SetRepositoryName(v string) *CreateRepositoryInp return s } +// SetTags sets the Tags field's value. +func (s *CreateRepositoryInput) SetTags(v map[string]*string) *CreateRepositoryInput { + s.Tags = v + return s +} + // Represents the output of a create repository operation. type CreateRepositoryOutput struct { _ struct{} `type:"structure"` @@ -8818,6 +9169,88 @@ func (s *ListRepositoriesOutput) SetRepositories(v []*RepositoryNameIdPair) *Lis return s } +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // An enumeration token that when provided in a request, returns the next batch + // of the results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The Amazon Resource Name (ARN) of the resource for which you want to get + // information about tags, if any. + // + // ResourceArn is a required field + ResourceArn *string `locationName:"resourceArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTagsForResourceInput) SetNextToken(v string) *ListTagsForResourceInput { + s.NextToken = &v + return s +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { + s.ResourceArn = &v + return s +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // An enumeration token that allows the operation to batch the next results + // of the operation. + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of tag key and value pairs associated with the specified resource. + Tags map[string]*string `locationName:"tags" type:"map"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTagsForResourceOutput) SetNextToken(v string) *ListTagsForResourceOutput { + s.NextToken = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v map[string]*string) *ListTagsForResourceOutput { + s.Tags = v + return s +} + // Returns information about the location of a change or comment in the comparison // between two commits or a pull request. type Location struct { @@ -10745,6 +11178,73 @@ func (s *SymbolicLink) SetRelativePath(v string) *SymbolicLink { return s } +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource to which you want to add or + // update tags. + // + // ResourceArn is a required field + ResourceArn *string `locationName:"resourceArn" type:"string" required:"true"` + + // The key-value pair to use when tagging this repository. + // + // Tags is a required field + Tags map[string]*string `locationName:"tags" type:"map" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v map[string]*string) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + // Returns information about a target for a pull request. type Target struct { _ struct{} `type:"structure"` @@ -10913,6 +11413,73 @@ func (s *TestRepositoryTriggersOutput) SetSuccessfulExecutions(v []*string) *Tes return s } +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource to which you want to remove + // tags. + // + // ResourceArn is a required field + ResourceArn *string `locationName:"resourceArn" type:"string" required:"true"` + + // The tag key for each tag that you want to remove from the resource. + // + // TagKeys is a required field + TagKeys []*string `locationName:"tagKeys" type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + type UpdateCommentInput struct { _ struct{} `type:"structure"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/codecommit/doc.go b/vendor/github.com/aws/aws-sdk-go/service/codecommit/doc.go index 71abd772b8b..70a79d79afd 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codecommit/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codecommit/doc.go @@ -118,6 +118,16 @@ // * UpdateComment, which updates the content of a comment on a commit in // a repository. // +// Tags used to tag resources in AWS CodeCommit (not Git tags), by calling the +// following: +// +// * ListTagsForResource, which gets information about AWS tags for a specified +// Amazon Resource Name (ARN) in AWS CodeCommit. +// +// * TagResource, which adds or updates tags for a resource in AWS CodeCommit. +// +// * UntagResource, which removes tags for a resource in AWS CodeCommit. +// // Triggers, by calling the following: // // * GetRepositoryTriggers, which returns information about triggers configured @@ -130,7 +140,7 @@ // trigger by sending data to the trigger target. // // For information about how to use AWS CodeCommit, see the AWS CodeCommit User -// Guide (http://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html). +// Guide (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html). // // See https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13 for more information on this service. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/codecommit/errors.go b/vendor/github.com/aws/aws-sdk-go/service/codecommit/errors.go index e97adeb7d6c..67fd3609456 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codecommit/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codecommit/errors.go @@ -255,7 +255,7 @@ const ( // // The specified file exceeds the file size limit for AWS CodeCommit. For more // information about limits in AWS CodeCommit, see AWS CodeCommit User Guide - // (http://docs.aws.amazon.com/codecommit/latest/userguide/limits.html). + // (https://docs.aws.amazon.com/codecommit/latest/userguide/limits.html). ErrCodeFileTooLargeException = "FileTooLargeException" // ErrCodeFolderContentSizeLimitExceededException for service response error code @@ -520,6 +520,14 @@ const ( // Triggers must be created in the same region as the target for the trigger. ErrCodeInvalidRepositoryTriggerRegionException = "InvalidRepositoryTriggerRegionException" + // ErrCodeInvalidResourceArnException for service response error code + // "InvalidResourceArnException". + // + // The value for the resource ARN is not valid. For more information about resources + // in AWS CodeCommit, see CodeCommit Resources and Operations (https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-access-control-identity-based.html#arn-formats) + // in the AWS CodeCommit User Guide. + ErrCodeInvalidResourceArnException = "InvalidResourceArnException" + // ErrCodeInvalidSortByException for service response error code // "InvalidSortByException". // @@ -533,6 +541,24 @@ const ( // name, tag, or full commit ID. ErrCodeInvalidSourceCommitSpecifierException = "InvalidSourceCommitSpecifierException" + // ErrCodeInvalidSystemTagUsageException for service response error code + // "InvalidSystemTagUsageException". + // + // The specified tag is not valid. Key names cannot be prefixed with aws:. + ErrCodeInvalidSystemTagUsageException = "InvalidSystemTagUsageException" + + // ErrCodeInvalidTagKeysListException for service response error code + // "InvalidTagKeysListException". + // + // The list of tags is not valid. + ErrCodeInvalidTagKeysListException = "InvalidTagKeysListException" + + // ErrCodeInvalidTagsMapException for service response error code + // "InvalidTagsMapException". + // + // The map of tags is not valid. + ErrCodeInvalidTagsMapException = "InvalidTagsMapException" + // ErrCodeInvalidTargetException for service response error code // "InvalidTargetException". // @@ -783,6 +809,15 @@ const ( // The list of triggers for the repository is required but was not specified. ErrCodeRepositoryTriggersListRequiredException = "RepositoryTriggersListRequiredException" + // ErrCodeResourceArnRequiredException for service response error code + // "ResourceArnRequiredException". + // + // A valid Amazon Resource Name (ARN) for an AWS CodeCommit resource is required. + // For a list of valid resources in AWS CodeCommit, see CodeCommit Resources + // and Operations (https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-access-control-identity-based.html#arn-formats) + // in the AWS CodeCommit User Guide. + ErrCodeResourceArnRequiredException = "ResourceArnRequiredException" + // ErrCodeRestrictedSourceFileException for service response error code // "RestrictedSourceFileException". // @@ -821,6 +856,24 @@ const ( // been specified for the commit. ErrCodeSourceFileOrContentRequiredException = "SourceFileOrContentRequiredException" + // ErrCodeTagKeysListRequiredException for service response error code + // "TagKeysListRequiredException". + // + // A list of tag keys is required. The list cannot be empty or null. + ErrCodeTagKeysListRequiredException = "TagKeysListRequiredException" + + // ErrCodeTagPolicyException for service response error code + // "TagPolicyException". + // + // The tag policy is not valid. + ErrCodeTagPolicyException = "TagPolicyException" + + // ErrCodeTagsMapRequiredException for service response error code + // "TagsMapRequiredException". + // + // A map of tags is required. + ErrCodeTagsMapRequiredException = "TagsMapRequiredException" + // ErrCodeTargetRequiredException for service response error code // "TargetRequiredException". // @@ -856,4 +909,10 @@ const ( // // A pull request title is required. It cannot be empty or null. ErrCodeTitleRequiredException = "TitleRequiredException" + + // ErrCodeTooManyTagsException for service response error code + // "TooManyTagsException". + // + // The maximum number of tags for an AWS CodeCommit resource has been exceeded. + ErrCodeTooManyTagsException = "TooManyTagsException" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/dlm/api.go b/vendor/github.com/aws/aws-sdk-go/service/dlm/api.go index 2c83090c38c..6f6e1bb03ff 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dlm/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dlm/api.go @@ -566,7 +566,8 @@ func (s *CreateLifecyclePolicyOutput) SetPolicyId(v string) *CreateLifecyclePoli type CreateRule struct { _ struct{} `type:"structure"` - // The interval. The supported values are 12 and 24. + // The interval between snapshots. The supported values are 2, 3, 4, 6, 8, 12, + // and 24. // // Interval is a required field Interval *int64 `min:"1" type:"integer" required:"true"` @@ -576,7 +577,7 @@ type CreateRule struct { // IntervalUnit is a required field IntervalUnit *string `type:"string" required:"true" enum:"IntervalUnitValues"` - // The time, in UTC, to start the operation. + // The time, in UTC, to start the operation. The supported format is hh:mm. // // The operation occurs within a one-hour window following the specified time. Times []*string `type:"list"` @@ -974,10 +975,44 @@ func (s *LifecyclePolicySummary) SetState(v string) *LifecyclePolicySummary { return s } +// Optional parameters that can be added to the policy. The set of valid parameters +// depends on the combination of policyType and resourceType values. +type Parameters struct { + _ struct{} `type:"structure"` + + // When executing an EBS Snapshot Management – Instance policy, execute all + // CreateSnapshots calls with the excludeBootVolume set to the supplied field. + // Defaults to false. Only valid for EBS Snapshot Management – Instance policies. + ExcludeBootVolume *bool `type:"boolean"` +} + +// String returns the string representation +func (s Parameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Parameters) GoString() string { + return s.String() +} + +// SetExcludeBootVolume sets the ExcludeBootVolume field's value. +func (s *Parameters) SetExcludeBootVolume(v bool) *Parameters { + s.ExcludeBootVolume = &v + return s +} + // Specifies the configuration of a lifecycle policy. type PolicyDetails struct { _ struct{} `type:"structure"` + // A set of optional parameters that can be provided by the policy. + Parameters *Parameters `type:"structure"` + + // This field determines the valid target resource types and actions a policy + // can manage. This field defaults to EBS_SNAPSHOT_MANAGEMENT if not present. + PolicyType *string `type:"string" enum:"PolicyTypeValues"` + // The resource type. ResourceTypes []*string `min:"1" type:"list"` @@ -1037,6 +1072,18 @@ func (s *PolicyDetails) Validate() error { return nil } +// SetParameters sets the Parameters field's value. +func (s *PolicyDetails) SetParameters(v *Parameters) *PolicyDetails { + s.Parameters = v + return s +} + +// SetPolicyType sets the PolicyType field's value. +func (s *PolicyDetails) SetPolicyType(v string) *PolicyDetails { + s.PolicyType = &v + return s +} + // SetResourceTypes sets the ResourceTypes field's value. func (s *PolicyDetails) SetResourceTypes(v []*string) *PolicyDetails { s.ResourceTypes = v @@ -1101,6 +1148,8 @@ func (s *RetainRule) SetCount(v int64) *RetainRule { type Schedule struct { _ struct{} `type:"structure"` + // Copy all user-defined tags on a source volume to snapshots of the volume + // created by this policy. CopyTags *bool `type:"boolean"` // The create rule. @@ -1115,6 +1164,12 @@ type Schedule struct { // The tags to apply to policy-created resources. These user-defined tags are // in addition to the AWS-added lifecycle tags. TagsToAdd []*Tag `type:"list"` + + // A collection of key/value pairs with values determined dynamically when the + // policy is executed. Keys may be any valid Amazon EC2 tag key. Values must + // be in one of the two following formats: $(instance-id) or $(timestamp). Variable + // tags are only valid for EBS Snapshot Management – Instance policies. + VariableTags []*Tag `type:"list"` } // String returns the string representation @@ -1150,6 +1205,16 @@ func (s *Schedule) Validate() error { } } } + if s.VariableTags != nil { + for i, v := range s.VariableTags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "VariableTags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -1187,6 +1252,12 @@ func (s *Schedule) SetTagsToAdd(v []*Tag) *Schedule { return s } +// SetVariableTags sets the VariableTags field's value. +func (s *Schedule) SetVariableTags(v []*Tag) *Schedule { + s.VariableTags = v + return s +} + // Specifies a tag for a resource. type Tag struct { _ struct{} `type:"structure"` @@ -1355,9 +1426,17 @@ const ( IntervalUnitValuesHours = "HOURS" ) +const ( + // PolicyTypeValuesEbsSnapshotManagement is a PolicyTypeValues enum value + PolicyTypeValuesEbsSnapshotManagement = "EBS_SNAPSHOT_MANAGEMENT" +) + const ( // ResourceTypeValuesVolume is a ResourceTypeValues enum value ResourceTypeValuesVolume = "VOLUME" + + // ResourceTypeValuesInstance is a ResourceTypeValues enum value + ResourceTypeValuesInstance = "INSTANCE" ) const ( diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index 592939cee05..8683f47034d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -2020,9 +2020,9 @@ func (c *EC2) AuthorizeSecurityGroupEgressRequest(input *AuthorizeSecurityGroupE // [VPC only] Adds the specified egress rules to a security group for use with // a VPC. // -// An outbound rule permits instances to send traffic to the specified destination -// IPv4 or IPv6 CIDR address ranges, or to the specified destination security -// groups for the same VPC. +// An outbound rule permits instances to send traffic to the specified IPv4 +// or IPv6 CIDR address ranges, or to the instances associated with the specified +// destination security groups. // // You specify a protocol for each rule (for example, TCP). For the TCP and // UDP protocols, you must also specify the destination port or port range. @@ -2110,9 +2110,9 @@ func (c *EC2) AuthorizeSecurityGroupIngressRequest(input *AuthorizeSecurityGroup // // Adds the specified ingress rules to a security group. // -// An inbound rule permits instances to receive traffic from the specified destination -// IPv4 or IPv6 CIDR address ranges, or from the specified destination security -// groups. +// An inbound rule permits instances to receive traffic from the specified IPv4 +// or IPv6 CIDR address ranges, or from the instances associated with the specified +// destination security groups. // // You specify a protocol for each rule (for example, TCP). For TCP and UDP, // you must also specify the destination port or port range. For ICMP/ICMPv6, @@ -5578,6 +5578,83 @@ func (c *EC2) CreateSnapshotWithContext(ctx aws.Context, input *CreateSnapshotIn return out, req.Send() } +const opCreateSnapshots = "CreateSnapshots" + +// CreateSnapshotsRequest generates a "aws/request.Request" representing the +// client's request for the CreateSnapshots operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateSnapshots for more information on using the CreateSnapshots +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateSnapshotsRequest method. +// req, resp := client.CreateSnapshotsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateSnapshots +func (c *EC2) CreateSnapshotsRequest(input *CreateSnapshotsInput) (req *request.Request, output *CreateSnapshotsOutput) { + op := &request.Operation{ + Name: opCreateSnapshots, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSnapshotsInput{} + } + + output = &CreateSnapshotsOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateSnapshots API operation for Amazon Elastic Compute Cloud. +// +// Creates crash-consistent snapshots of multiple EBS volumes and stores the +// data in S3. Volumes are chosen by specifying an instance. Any attached volumes +// will produce one snapshot each that is crash-consistent across the instance. +// Boot volumes can be excluded by changing the paramaters. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateSnapshots for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateSnapshots +func (c *EC2) CreateSnapshots(input *CreateSnapshotsInput) (*CreateSnapshotsOutput, error) { + req, out := c.CreateSnapshotsRequest(input) + return out, req.Send() +} + +// CreateSnapshotsWithContext is the same as CreateSnapshots with the addition of +// the ability to pass a context and additional request options. +// +// See CreateSnapshots for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateSnapshotsWithContext(ctx aws.Context, input *CreateSnapshotsInput, opts ...request.Option) (*CreateSnapshotsOutput, error) { + req, out := c.CreateSnapshotsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateSpotDatafeedSubscription = "CreateSpotDatafeedSubscription" // CreateSpotDatafeedSubscriptionRequest generates a "aws/request.Request" representing the @@ -22348,7 +22425,7 @@ func (c *EC2) EnableEbsEncryptionByDefaultRequest(input *EnableEbsEncryptionByDe // snapshot is also encrypted. For more information, see Amazon EBS Snapshots // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSSnapshots.html). // -// Once EBS encryption by default is enabled, you can no longer launch older-generation +// After EBS encryption by default is enabled, you can no longer launch older-generation // instance types that do not support encryption. For more information, see // Supported Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances). // @@ -24696,7 +24773,7 @@ func (c *EC2) ModifyEbsDefaultKmsKeyIdRequest(input *ModifyEbsDefaultKmsKeyIdInp // ModifyEbsDefaultKmsKeyId API operation for Amazon Elastic Compute Cloud. // // Changes the default customer master key (CMK) that your account uses to encrypt -// EBS volumes if you don’t specify a CMK in the API call. +// EBS volumes if you don't specify a CMK in the API call. // // Your account has an AWS-managed default CMK that is used for encrypting an // EBS volume when no CMK is specified in the API call that creates the volume. @@ -36826,17 +36903,16 @@ type CopyImageInput struct { // the default CMK for EBS is used. If a KmsKeyId is specified, the Encrypted // flag must also be set. // - // The CMK identifier may be provided in any of the following formats: + // To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, + // or alias ARN. When using an alias name, prefix it with "alias/". For example: // - // * Key ID + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab // - // * ARN using key ID. The ID ARN contains the arn:aws:kms namespace, followed - // by the Region of the CMK, the AWS account ID of the CMK owner, the key - // namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. + // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // - // * ARN using key alias. The alias ARN contains the arn:aws:kms namespace, - // followed by the Region of the CMK, the AWS account ID of the CMK owner, - // the alias namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. + // * Alias name: alias/ExampleAlias + // + // * Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias // // AWS parses KmsKeyId asynchronously, meaning that the action you call may // appear to complete even though you provided an invalid identifier. This action @@ -40655,6 +40731,105 @@ func (s *CreateSnapshotInput) SetVolumeId(v string) *CreateSnapshotInput { return s } +type CreateSnapshotsInput struct { + _ struct{} `type:"structure"` + + // Copies the tags from the specified instance to all snapshots. + CopyTagsFromSource *string `type:"string" enum:"CopyTagsFromSource"` + + // A description propagated to every snapshot specified by the instance. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action without actually + // making the request. Provides an error response. If you have the required + // permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The instance to specify which volumes should be included in the snapshots. + // + // InstanceSpecification is a required field + InstanceSpecification *InstanceSpecification `type:"structure" required:"true"` + + // Tags to apply to every snapshot specified by the instance. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateSnapshotsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSnapshotsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSnapshotsInput"} + if s.InstanceSpecification == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceSpecification")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCopyTagsFromSource sets the CopyTagsFromSource field's value. +func (s *CreateSnapshotsInput) SetCopyTagsFromSource(v string) *CreateSnapshotsInput { + s.CopyTagsFromSource = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateSnapshotsInput) SetDescription(v string) *CreateSnapshotsInput { + s.Description = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateSnapshotsInput) SetDryRun(v bool) *CreateSnapshotsInput { + s.DryRun = &v + return s +} + +// SetInstanceSpecification sets the InstanceSpecification field's value. +func (s *CreateSnapshotsInput) SetInstanceSpecification(v *InstanceSpecification) *CreateSnapshotsInput { + s.InstanceSpecification = v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateSnapshotsInput) SetTagSpecifications(v []*TagSpecification) *CreateSnapshotsInput { + s.TagSpecifications = v + return s +} + +type CreateSnapshotsOutput struct { + _ struct{} `type:"structure"` + + // List of snapshots. + Snapshots []*SnapshotInfo `locationName:"snapshotSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateSnapshotsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSnapshotsOutput) GoString() string { + return s.String() +} + +// SetSnapshots sets the Snapshots field's value. +func (s *CreateSnapshotsOutput) SetSnapshots(v []*SnapshotInfo) *CreateSnapshotsOutput { + s.Snapshots = v + return s +} + // Contains the parameters for CreateSpotDatafeedSubscription. type CreateSpotDatafeedSubscriptionInput struct { _ struct{} `type:"structure"` @@ -41375,13 +41550,13 @@ type CreateVolumeInput struct { DryRun *bool `locationName:"dryRun" type:"boolean"` // Specifies the encryption state of the volume. The default effect of setting - // the Encrypted parameter to true through the console, API, or CLI depends - // on the volume's origin (new or from a snapshot), starting encryption state, - // ownership, and whether account-level encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/account-level-encryption.html) + // the Encrypted parameter to true depends on the volume origin (new or from + // a snapshot), starting encryption state, ownership, and whether account-level + // encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/account-level-encryption.html) // is enabled. Each default case can be overridden by specifying a customer - // master key (CMK) with the KmsKeyId parameter in addition to setting Encrypted + // master key (CMK) using the KmsKeyId parameter, in addition to setting Encrypted // to true. For a complete list of possible encryption cases, see Amazon EBS - // Encryption (AWSEC2/latest/UserGuide/EBSEncryption.htm). + // Encryption (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html). // // Encrypted Amazon EBS volumes may only be attached to instances that support // Amazon EBS encryption. For more information, see Supported Instance Types @@ -43211,6 +43386,8 @@ type DeleteFlowLogsInput struct { // One or more flow log IDs. // + // Constraint: Maximum of 1000 flow log IDs. + // // FlowLogIds is a required field FlowLogIds []*string `locationName:"FlowLogId" locationNameList:"item" type:"list" required:"true"` } @@ -48123,6 +48300,8 @@ type DescribeFlowLogsInput struct { Filter []*Filter `locationNameList:"Filter" type:"list"` // One or more flow log IDs. + // + // Constraint: Maximum of 1000 flow log IDs. FlowLogIds []*string `locationName:"FlowLogId" locationNameList:"item" type:"list"` // The maximum number of results to return with a single call. To retrieve the @@ -66211,7 +66390,7 @@ type InstanceNetworkInterfaceSpecification struct { // // If you are not creating an EFA, specify interface or omit this parameter. // - // Valide values: interface | efa + // Valid values: interface | efa InterfaceType *string `type:"string"` // A number of IPv6 addresses to assign to the network interface. Amazon EC2 @@ -66395,6 +66574,39 @@ func (s *InstancePrivateIpAddress) SetPrivateIpAddress(v string) *InstancePrivat return s } +// The instance details to specify which volumes should be snapshotted. +type InstanceSpecification struct { + _ struct{} `type:"structure"` + + // Excludes the root volume from being snapshotted. + ExcludeBootVolume *bool `type:"boolean"` + + // The instance to specify which volumes should be snapshotted. + InstanceId *string `type:"string"` +} + +// String returns the string representation +func (s InstanceSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceSpecification) GoString() string { + return s.String() +} + +// SetExcludeBootVolume sets the ExcludeBootVolume field's value. +func (s *InstanceSpecification) SetExcludeBootVolume(v bool) *InstanceSpecification { + s.ExcludeBootVolume = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *InstanceSpecification) SetInstanceId(v string) *InstanceSpecification { + s.InstanceId = &v + return s +} + // Describes the current state of an instance. type InstanceState struct { _ struct{} `type:"structure"` @@ -68253,7 +68465,13 @@ type LaunchTemplateInstanceNetworkInterfaceSpecificationRequest struct { // The IDs of one or more security groups. Groups []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` - // The type of networking interface. + // The type of network interface. To create an Elastic Fabric Adapter (EFA), + // specify efa. For more information, see Elastic Fabric Adapter (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // If you are not creating an EFA, specify interface or omit this parameter. + // + // Valid values: interface | efa InterfaceType *string `type:"string"` // The number of IPv6 addresses to assign to a network interface. Amazon EC2 @@ -75448,7 +75666,10 @@ type RegisterImageInput struct { // PV AMI can make instances launched from the AMI unreachable. EnaSupport *bool `locationName:"enaSupport" type:"boolean"` - // The full path to your AMI manifest in Amazon S3 storage. + // The full path to your AMI manifest in Amazon S3 storage. The specified bucket + // must have the aws-exec-read canned access control list (ACL) to ensure that + // it can be accessed by Amazon EC2. For more information, see Canned ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) + // in the Amazon S3 Service Developer Guide. ImageLocation *string `type:"string"` // The ID of the kernel. @@ -82569,6 +82790,113 @@ func (s *SnapshotDiskContainer) SetUserBucket(v *UserBucket) *SnapshotDiskContai return s } +// Object that contains information about a snapshot. +type SnapshotInfo struct { + _ struct{} `type:"structure"` + + // Description specified by the CreateSnapshotRequest that has been applied + // to all snapshots. + Description *string `locationName:"description" type:"string"` + + // Boolean that specifies whether or not this snapshot is encrypted. + Encrypted *bool `locationName:"encrypted" type:"boolean"` + + // Account id used when creating this snapshot. + OwnerId *string `locationName:"ownerId" type:"string"` + + // Progress this snapshot has made towards completing. + Progress *string `locationName:"progress" type:"string"` + + // Snapshot id that can be used to describe this snapshot. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // Time this snapshot was started. This is the same for all snapshots initiated + // by the same request. + StartTime *time.Time `locationName:"startTime" type:"timestamp"` + + // Current state of the snapshot. + State *string `locationName:"state" type:"string" enum:"SnapshotState"` + + // Tags associated with this snapshot. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // Source volume from which this snapshot was created. + VolumeId *string `locationName:"volumeId" type:"string"` + + // Size of the volume from which this snapshot was created. + VolumeSize *int64 `locationName:"volumeSize" type:"integer"` +} + +// String returns the string representation +func (s SnapshotInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SnapshotInfo) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *SnapshotInfo) SetDescription(v string) *SnapshotInfo { + s.Description = &v + return s +} + +// SetEncrypted sets the Encrypted field's value. +func (s *SnapshotInfo) SetEncrypted(v bool) *SnapshotInfo { + s.Encrypted = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *SnapshotInfo) SetOwnerId(v string) *SnapshotInfo { + s.OwnerId = &v + return s +} + +// SetProgress sets the Progress field's value. +func (s *SnapshotInfo) SetProgress(v string) *SnapshotInfo { + s.Progress = &v + return s +} + +// SetSnapshotId sets the SnapshotId field's value. +func (s *SnapshotInfo) SetSnapshotId(v string) *SnapshotInfo { + s.SnapshotId = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *SnapshotInfo) SetStartTime(v time.Time) *SnapshotInfo { + s.StartTime = &v + return s +} + +// SetState sets the State field's value. +func (s *SnapshotInfo) SetState(v string) *SnapshotInfo { + s.State = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *SnapshotInfo) SetTags(v []*Tag) *SnapshotInfo { + s.Tags = v + return s +} + +// SetVolumeId sets the VolumeId field's value. +func (s *SnapshotInfo) SetVolumeId(v string) *SnapshotInfo { + s.VolumeId = &v + return s +} + +// SetVolumeSize sets the VolumeSize field's value. +func (s *SnapshotInfo) SetVolumeSize(v int64) *SnapshotInfo { + s.VolumeSize = &v + return s +} + // Details about the import snapshot task. type SnapshotTaskDetail struct { _ struct{} `type:"structure"` @@ -89324,6 +89652,11 @@ const ( ConversionTaskStateCompleted = "completed" ) +const ( + // CopyTagsFromSourceVolume is a CopyTagsFromSource enum value + CopyTagsFromSourceVolume = "volume" +) + const ( // CurrencyCodeValuesUsd is a CurrencyCodeValues enum value CurrencyCodeValuesUsd = "USD" @@ -90100,6 +90433,27 @@ const ( // InstanceTypeI3Metal is a InstanceType enum value InstanceTypeI3Metal = "i3.metal" + // InstanceTypeI3enLarge is a InstanceType enum value + InstanceTypeI3enLarge = "i3en.large" + + // InstanceTypeI3enXlarge is a InstanceType enum value + InstanceTypeI3enXlarge = "i3en.xlarge" + + // InstanceTypeI3en2xlarge is a InstanceType enum value + InstanceTypeI3en2xlarge = "i3en.2xlarge" + + // InstanceTypeI3en3xlarge is a InstanceType enum value + InstanceTypeI3en3xlarge = "i3en.3xlarge" + + // InstanceTypeI3en6xlarge is a InstanceType enum value + InstanceTypeI3en6xlarge = "i3en.6xlarge" + + // InstanceTypeI3en12xlarge is a InstanceType enum value + InstanceTypeI3en12xlarge = "i3en.12xlarge" + + // InstanceTypeI3en24xlarge is a InstanceType enum value + InstanceTypeI3en24xlarge = "i3en.24xlarge" + // InstanceTypeHi14xlarge is a InstanceType enum value InstanceTypeHi14xlarge = "hi1.4xlarge" diff --git a/vendor/github.com/aws/aws-sdk-go/service/kafka/api.go b/vendor/github.com/aws/aws-sdk-go/service/kafka/api.go index b26b05b49bb..28b8752e5dd 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kafka/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kafka/api.go @@ -3,6 +3,7 @@ package kafka import ( + "fmt" "time" "github.com/aws/aws-sdk-go/aws" @@ -387,6 +388,97 @@ func (c *Kafka) DescribeClusterWithContext(ctx aws.Context, input *DescribeClust return out, req.Send() } +const opDescribeClusterOperation = "DescribeClusterOperation" + +// DescribeClusterOperationRequest generates a "aws/request.Request" representing the +// client's request for the DescribeClusterOperation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeClusterOperation for more information on using the DescribeClusterOperation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeClusterOperationRequest method. +// req, resp := client.DescribeClusterOperationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/DescribeClusterOperation +func (c *Kafka) DescribeClusterOperationRequest(input *DescribeClusterOperationInput) (req *request.Request, output *DescribeClusterOperationOutput) { + op := &request.Operation{ + Name: opDescribeClusterOperation, + HTTPMethod: "GET", + HTTPPath: "/v1/operations/{clusterOperationArn}", + } + + if input == nil { + input = &DescribeClusterOperationInput{} + } + + output = &DescribeClusterOperationOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeClusterOperation API operation for Managed Streaming for Kafka. +// +// Returns a description of the cluster operation specified by the ARN. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Managed Streaming for Kafka's +// API operation DescribeClusterOperation for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNotFoundException "NotFoundException" +// Returns information about an error. +// +// * ErrCodeBadRequestException "BadRequestException" +// Returns information about an error. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Returns information about an error. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Returns information about an error. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/DescribeClusterOperation +func (c *Kafka) DescribeClusterOperation(input *DescribeClusterOperationInput) (*DescribeClusterOperationOutput, error) { + req, out := c.DescribeClusterOperationRequest(input) + return out, req.Send() +} + +// DescribeClusterOperationWithContext is the same as DescribeClusterOperation with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeClusterOperation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kafka) DescribeClusterOperationWithContext(ctx aws.Context, input *DescribeClusterOperationInput, opts ...request.Option) (*DescribeClusterOperationOutput, error) { + req, out := c.DescribeClusterOperationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeConfiguration = "DescribeConfiguration" // DescribeConfigurationRequest generates a "aws/request.Request" representing the @@ -666,37 +758,37 @@ func (c *Kafka) GetBootstrapBrokersWithContext(ctx aws.Context, input *GetBootst return out, req.Send() } -const opListClusters = "ListClusters" +const opListClusterOperations = "ListClusterOperations" -// ListClustersRequest generates a "aws/request.Request" representing the -// client's request for the ListClusters operation. The "output" return +// ListClusterOperationsRequest generates a "aws/request.Request" representing the +// client's request for the ListClusterOperations operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListClusters for more information on using the ListClusters +// See ListClusterOperations for more information on using the ListClusterOperations // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListClustersRequest method. -// req, resp := client.ListClustersRequest(params) +// // Example sending a request using the ListClusterOperationsRequest method. +// req, resp := client.ListClusterOperationsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListClusters -func (c *Kafka) ListClustersRequest(input *ListClustersInput) (req *request.Request, output *ListClustersOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListClusterOperations +func (c *Kafka) ListClusterOperationsRequest(input *ListClusterOperationsInput) (req *request.Request, output *ListClusterOperationsOutput) { op := &request.Operation{ - Name: opListClusters, + Name: opListClusterOperations, HTTPMethod: "GET", - HTTPPath: "/v1/clusters", + HTTPPath: "/v1/clusters/{clusterArn}/operations", Paginator: &request.Paginator{ InputTokens: []string{"NextToken"}, OutputTokens: []string{"NextToken"}, @@ -706,24 +798,25 @@ func (c *Kafka) ListClustersRequest(input *ListClustersInput) (req *request.Requ } if input == nil { - input = &ListClustersInput{} + input = &ListClusterOperationsInput{} } - output = &ListClustersOutput{} + output = &ListClusterOperationsOutput{} req = c.newRequest(op, input, output) return } -// ListClusters API operation for Managed Streaming for Kafka. +// ListClusterOperations API operation for Managed Streaming for Kafka. // -// Returns a list of clusters in an account. +// Returns a list of all the operations that have been performed on the specified +// MSK cluster. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Managed Streaming for Kafka's -// API operation ListClusters for usage and error information. +// API operation ListClusterOperations for usage and error information. // // Returned Error Codes: // * ErrCodeBadRequestException "BadRequestException" @@ -738,65 +831,65 @@ func (c *Kafka) ListClustersRequest(input *ListClustersInput) (req *request.Requ // * ErrCodeForbiddenException "ForbiddenException" // Returns information about an error. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListClusters -func (c *Kafka) ListClusters(input *ListClustersInput) (*ListClustersOutput, error) { - req, out := c.ListClustersRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListClusterOperations +func (c *Kafka) ListClusterOperations(input *ListClusterOperationsInput) (*ListClusterOperationsOutput, error) { + req, out := c.ListClusterOperationsRequest(input) return out, req.Send() } -// ListClustersWithContext is the same as ListClusters with the addition of +// ListClusterOperationsWithContext is the same as ListClusterOperations with the addition of // the ability to pass a context and additional request options. // -// See ListClusters for details on how to use this API operation. +// See ListClusterOperations for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Kafka) ListClustersWithContext(ctx aws.Context, input *ListClustersInput, opts ...request.Option) (*ListClustersOutput, error) { - req, out := c.ListClustersRequest(input) +func (c *Kafka) ListClusterOperationsWithContext(ctx aws.Context, input *ListClusterOperationsInput, opts ...request.Option) (*ListClusterOperationsOutput, error) { + req, out := c.ListClusterOperationsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListClustersPages iterates over the pages of a ListClusters operation, +// ListClusterOperationsPages iterates over the pages of a ListClusterOperations operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See ListClusters method for more information on how to use this operation. +// See ListClusterOperations method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListClusters operation. +// // Example iterating over at most 3 pages of a ListClusterOperations operation. // pageNum := 0 -// err := client.ListClustersPages(params, -// func(page *kafka.ListClustersOutput, lastPage bool) bool { +// err := client.ListClusterOperationsPages(params, +// func(page *kafka.ListClusterOperationsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *Kafka) ListClustersPages(input *ListClustersInput, fn func(*ListClustersOutput, bool) bool) error { - return c.ListClustersPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *Kafka) ListClusterOperationsPages(input *ListClusterOperationsInput, fn func(*ListClusterOperationsOutput, bool) bool) error { + return c.ListClusterOperationsPagesWithContext(aws.BackgroundContext(), input, fn) } -// ListClustersPagesWithContext same as ListClustersPages except +// ListClusterOperationsPagesWithContext same as ListClusterOperationsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Kafka) ListClustersPagesWithContext(ctx aws.Context, input *ListClustersInput, fn func(*ListClustersOutput, bool) bool, opts ...request.Option) error { +func (c *Kafka) ListClusterOperationsPagesWithContext(ctx aws.Context, input *ListClusterOperationsInput, fn func(*ListClusterOperationsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *ListClustersInput + var inCpy *ListClusterOperationsInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.ListClustersRequest(inCpy) + req, _ := c.ListClusterOperationsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -805,42 +898,42 @@ func (c *Kafka) ListClustersPagesWithContext(ctx aws.Context, input *ListCluster cont := true for p.Next() && cont { - cont = fn(p.Page().(*ListClustersOutput), !p.HasNextPage()) + cont = fn(p.Page().(*ListClusterOperationsOutput), !p.HasNextPage()) } return p.Err() } -const opListConfigurations = "ListConfigurations" +const opListClusters = "ListClusters" -// ListConfigurationsRequest generates a "aws/request.Request" representing the -// client's request for the ListConfigurations operation. The "output" return +// ListClustersRequest generates a "aws/request.Request" representing the +// client's request for the ListClusters operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListConfigurations for more information on using the ListConfigurations +// See ListClusters for more information on using the ListClusters // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListConfigurationsRequest method. -// req, resp := client.ListConfigurationsRequest(params) +// // Example sending a request using the ListClustersRequest method. +// req, resp := client.ListClustersRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListConfigurations -func (c *Kafka) ListConfigurationsRequest(input *ListConfigurationsInput) (req *request.Request, output *ListConfigurationsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListClusters +func (c *Kafka) ListClustersRequest(input *ListClustersInput) (req *request.Request, output *ListClustersOutput) { op := &request.Operation{ - Name: opListConfigurations, + Name: opListClusters, HTTPMethod: "GET", - HTTPPath: "/v1/configurations", + HTTPPath: "/v1/clusters", Paginator: &request.Paginator{ InputTokens: []string{"NextToken"}, OutputTokens: []string{"NextToken"}, @@ -850,100 +943,97 @@ func (c *Kafka) ListConfigurationsRequest(input *ListConfigurationsInput) (req * } if input == nil { - input = &ListConfigurationsInput{} + input = &ListClustersInput{} } - output = &ListConfigurationsOutput{} + output = &ListClustersOutput{} req = c.newRequest(op, input, output) return } -// ListConfigurations API operation for Managed Streaming for Kafka. +// ListClusters API operation for Managed Streaming for Kafka. // -// Returns a list of all the MSK configurations in this Region for this account. +// Returns a list of all the MSK clusters in the current Region. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Managed Streaming for Kafka's -// API operation ListConfigurations for usage and error information. +// API operation ListClusters for usage and error information. // // Returned Error Codes: -// * ErrCodeServiceUnavailableException "ServiceUnavailableException" -// Returns information about an error. -// // * ErrCodeBadRequestException "BadRequestException" // Returns information about an error. // -// * ErrCodeUnauthorizedException "UnauthorizedException" +// * ErrCodeInternalServerErrorException "InternalServerErrorException" // Returns information about an error. // -// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// * ErrCodeUnauthorizedException "UnauthorizedException" // Returns information about an error. // // * ErrCodeForbiddenException "ForbiddenException" // Returns information about an error. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListConfigurations -func (c *Kafka) ListConfigurations(input *ListConfigurationsInput) (*ListConfigurationsOutput, error) { - req, out := c.ListConfigurationsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListClusters +func (c *Kafka) ListClusters(input *ListClustersInput) (*ListClustersOutput, error) { + req, out := c.ListClustersRequest(input) return out, req.Send() } -// ListConfigurationsWithContext is the same as ListConfigurations with the addition of +// ListClustersWithContext is the same as ListClusters with the addition of // the ability to pass a context and additional request options. // -// See ListConfigurations for details on how to use this API operation. +// See ListClusters for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Kafka) ListConfigurationsWithContext(ctx aws.Context, input *ListConfigurationsInput, opts ...request.Option) (*ListConfigurationsOutput, error) { - req, out := c.ListConfigurationsRequest(input) +func (c *Kafka) ListClustersWithContext(ctx aws.Context, input *ListClustersInput, opts ...request.Option) (*ListClustersOutput, error) { + req, out := c.ListClustersRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListConfigurationsPages iterates over the pages of a ListConfigurations operation, +// ListClustersPages iterates over the pages of a ListClusters operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See ListConfigurations method for more information on how to use this operation. +// See ListClusters method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListConfigurations operation. +// // Example iterating over at most 3 pages of a ListClusters operation. // pageNum := 0 -// err := client.ListConfigurationsPages(params, -// func(page *kafka.ListConfigurationsOutput, lastPage bool) bool { +// err := client.ListClustersPages(params, +// func(page *kafka.ListClustersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *Kafka) ListConfigurationsPages(input *ListConfigurationsInput, fn func(*ListConfigurationsOutput, bool) bool) error { - return c.ListConfigurationsPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *Kafka) ListClustersPages(input *ListClustersInput, fn func(*ListClustersOutput, bool) bool) error { + return c.ListClustersPagesWithContext(aws.BackgroundContext(), input, fn) } -// ListConfigurationsPagesWithContext same as ListConfigurationsPages except +// ListClustersPagesWithContext same as ListClustersPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Kafka) ListConfigurationsPagesWithContext(ctx aws.Context, input *ListConfigurationsInput, fn func(*ListConfigurationsOutput, bool) bool, opts ...request.Option) error { +func (c *Kafka) ListClustersPagesWithContext(ctx aws.Context, input *ListClustersInput, fn func(*ListClustersOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *ListConfigurationsInput + var inCpy *ListClustersInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.ListConfigurationsRequest(inCpy) + req, _ := c.ListClustersRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -952,42 +1042,42 @@ func (c *Kafka) ListConfigurationsPagesWithContext(ctx aws.Context, input *ListC cont := true for p.Next() && cont { - cont = fn(p.Page().(*ListConfigurationsOutput), !p.HasNextPage()) + cont = fn(p.Page().(*ListClustersOutput), !p.HasNextPage()) } return p.Err() } -const opListNodes = "ListNodes" +const opListConfigurationRevisions = "ListConfigurationRevisions" -// ListNodesRequest generates a "aws/request.Request" representing the -// client's request for the ListNodes operation. The "output" return +// ListConfigurationRevisionsRequest generates a "aws/request.Request" representing the +// client's request for the ListConfigurationRevisions operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListNodes for more information on using the ListNodes +// See ListConfigurationRevisions for more information on using the ListConfigurationRevisions // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListNodesRequest method. -// req, resp := client.ListNodesRequest(params) +// // Example sending a request using the ListConfigurationRevisionsRequest method. +// req, resp := client.ListConfigurationRevisionsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListNodes -func (c *Kafka) ListNodesRequest(input *ListNodesInput) (req *request.Request, output *ListNodesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListConfigurationRevisions +func (c *Kafka) ListConfigurationRevisionsRequest(input *ListConfigurationRevisionsInput) (req *request.Request, output *ListConfigurationRevisionsOutput) { op := &request.Operation{ - Name: opListNodes, + Name: opListConfigurationRevisions, HTTPMethod: "GET", - HTTPPath: "/v1/clusters/{clusterArn}/nodes", + HTTPPath: "/v1/configurations/{arn}/revisions", Paginator: &request.Paginator{ InputTokens: []string{"NextToken"}, OutputTokens: []string{"NextToken"}, @@ -997,30 +1087,30 @@ func (c *Kafka) ListNodesRequest(input *ListNodesInput) (req *request.Request, o } if input == nil { - input = &ListNodesInput{} + input = &ListConfigurationRevisionsInput{} } - output = &ListNodesOutput{} + output = &ListConfigurationRevisionsOutput{} req = c.newRequest(op, input, output) return } -// ListNodes API operation for Managed Streaming for Kafka. +// ListConfigurationRevisions API operation for Managed Streaming for Kafka. // -// Returns a list of the broker nodes in the cluster. +// Returns a list of all the MSK configurations in this Region. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Managed Streaming for Kafka's -// API operation ListNodes for usage and error information. +// API operation ListConfigurationRevisions for usage and error information. // // Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" +// * ErrCodeBadRequestException "BadRequestException" // Returns information about an error. // -// * ErrCodeBadRequestException "BadRequestException" +// * ErrCodeUnauthorizedException "UnauthorizedException" // Returns information about an error. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" @@ -1029,65 +1119,71 @@ func (c *Kafka) ListNodesRequest(input *ListNodesInput) (req *request.Request, o // * ErrCodeForbiddenException "ForbiddenException" // Returns information about an error. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListNodes -func (c *Kafka) ListNodes(input *ListNodesInput) (*ListNodesOutput, error) { - req, out := c.ListNodesRequest(input) +// * ErrCodeNotFoundException "NotFoundException" +// Returns information about an error. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListConfigurationRevisions +func (c *Kafka) ListConfigurationRevisions(input *ListConfigurationRevisionsInput) (*ListConfigurationRevisionsOutput, error) { + req, out := c.ListConfigurationRevisionsRequest(input) return out, req.Send() } -// ListNodesWithContext is the same as ListNodes with the addition of +// ListConfigurationRevisionsWithContext is the same as ListConfigurationRevisions with the addition of // the ability to pass a context and additional request options. // -// See ListNodes for details on how to use this API operation. +// See ListConfigurationRevisions for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Kafka) ListNodesWithContext(ctx aws.Context, input *ListNodesInput, opts ...request.Option) (*ListNodesOutput, error) { - req, out := c.ListNodesRequest(input) +func (c *Kafka) ListConfigurationRevisionsWithContext(ctx aws.Context, input *ListConfigurationRevisionsInput, opts ...request.Option) (*ListConfigurationRevisionsOutput, error) { + req, out := c.ListConfigurationRevisionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListNodesPages iterates over the pages of a ListNodes operation, +// ListConfigurationRevisionsPages iterates over the pages of a ListConfigurationRevisions operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See ListNodes method for more information on how to use this operation. +// See ListConfigurationRevisions method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListNodes operation. +// // Example iterating over at most 3 pages of a ListConfigurationRevisions operation. // pageNum := 0 -// err := client.ListNodesPages(params, -// func(page *kafka.ListNodesOutput, lastPage bool) bool { +// err := client.ListConfigurationRevisionsPages(params, +// func(page *kafka.ListConfigurationRevisionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *Kafka) ListNodesPages(input *ListNodesInput, fn func(*ListNodesOutput, bool) bool) error { - return c.ListNodesPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *Kafka) ListConfigurationRevisionsPages(input *ListConfigurationRevisionsInput, fn func(*ListConfigurationRevisionsOutput, bool) bool) error { + return c.ListConfigurationRevisionsPagesWithContext(aws.BackgroundContext(), input, fn) } -// ListNodesPagesWithContext same as ListNodesPages except +// ListConfigurationRevisionsPagesWithContext same as ListConfigurationRevisionsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Kafka) ListNodesPagesWithContext(ctx aws.Context, input *ListNodesInput, fn func(*ListNodesOutput, bool) bool, opts ...request.Option) error { +func (c *Kafka) ListConfigurationRevisionsPagesWithContext(ctx aws.Context, input *ListConfigurationRevisionsInput, fn func(*ListConfigurationRevisionsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *ListNodesInput + var inCpy *ListConfigurationRevisionsInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.ListNodesRequest(inCpy) + req, _ := c.ListConfigurationRevisionsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -1096,268 +1192,800 @@ func (c *Kafka) ListNodesPagesWithContext(ctx aws.Context, input *ListNodesInput cont := true for p.Next() && cont { - cont = fn(p.Page().(*ListNodesOutput), !p.HasNextPage()) + cont = fn(p.Page().(*ListConfigurationRevisionsOutput), !p.HasNextPage()) } return p.Err() } -const opListTagsForResource = "ListTagsForResource" +const opListConfigurations = "ListConfigurations" -// ListTagsForResourceRequest generates a "aws/request.Request" representing the -// client's request for the ListTagsForResource operation. The "output" return +// ListConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListConfigurations operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListTagsForResource for more information on using the ListTagsForResource +// See ListConfigurations for more information on using the ListConfigurations // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListTagsForResourceRequest method. -// req, resp := client.ListTagsForResourceRequest(params) +// // Example sending a request using the ListConfigurationsRequest method. +// req, resp := client.ListConfigurationsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListTagsForResource -func (c *Kafka) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListConfigurations +func (c *Kafka) ListConfigurationsRequest(input *ListConfigurationsInput) (req *request.Request, output *ListConfigurationsOutput) { op := &request.Operation{ - Name: opListTagsForResource, + Name: opListConfigurations, HTTPMethod: "GET", - HTTPPath: "/v1/tags/{resourceArn}", + HTTPPath: "/v1/configurations", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &ListTagsForResourceInput{} + input = &ListConfigurationsInput{} } - output = &ListTagsForResourceOutput{} + output = &ListConfigurationsOutput{} req = c.newRequest(op, input, output) return } -// ListTagsForResource API operation for Managed Streaming for Kafka. +// ListConfigurations API operation for Managed Streaming for Kafka. // -// List tags for a resource. +// Returns a list of all the MSK configurations in this Region. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Managed Streaming for Kafka's -// API operation ListTagsForResource for usage and error information. +// API operation ListConfigurations for usage and error information. // // Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" // Returns information about an error. // // * ErrCodeBadRequestException "BadRequestException" // Returns information about an error. // -// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Returns information about an error. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Returns information about an error. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListConfigurations +func (c *Kafka) ListConfigurations(input *ListConfigurationsInput) (*ListConfigurationsOutput, error) { + req, out := c.ListConfigurationsRequest(input) + return out, req.Send() +} + +// ListConfigurationsWithContext is the same as ListConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kafka) ListConfigurationsWithContext(ctx aws.Context, input *ListConfigurationsInput, opts ...request.Option) (*ListConfigurationsOutput, error) { + req, out := c.ListConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListConfigurationsPages iterates over the pages of a ListConfigurations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListConfigurations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListConfigurations operation. +// pageNum := 0 +// err := client.ListConfigurationsPages(params, +// func(page *kafka.ListConfigurationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Kafka) ListConfigurationsPages(input *ListConfigurationsInput, fn func(*ListConfigurationsOutput, bool) bool) error { + return c.ListConfigurationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListConfigurationsPagesWithContext same as ListConfigurationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kafka) ListConfigurationsPagesWithContext(ctx aws.Context, input *ListConfigurationsInput, fn func(*ListConfigurationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListConfigurationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListConfigurationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListConfigurationsOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListNodes = "ListNodes" + +// ListNodesRequest generates a "aws/request.Request" representing the +// client's request for the ListNodes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListNodes for more information on using the ListNodes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListNodesRequest method. +// req, resp := client.ListNodesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListNodes +func (c *Kafka) ListNodesRequest(input *ListNodesInput) (req *request.Request, output *ListNodesOutput) { + op := &request.Operation{ + Name: opListNodes, + HTTPMethod: "GET", + HTTPPath: "/v1/clusters/{clusterArn}/nodes", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListNodesInput{} + } + + output = &ListNodesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListNodes API operation for Managed Streaming for Kafka. +// +// Returns a list of the broker nodes in the cluster. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Managed Streaming for Kafka's +// API operation ListNodes for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNotFoundException "NotFoundException" +// Returns information about an error. +// +// * ErrCodeBadRequestException "BadRequestException" +// Returns information about an error. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Returns information about an error. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListNodes +func (c *Kafka) ListNodes(input *ListNodesInput) (*ListNodesOutput, error) { + req, out := c.ListNodesRequest(input) + return out, req.Send() +} + +// ListNodesWithContext is the same as ListNodes with the addition of +// the ability to pass a context and additional request options. +// +// See ListNodes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kafka) ListNodesWithContext(ctx aws.Context, input *ListNodesInput, opts ...request.Option) (*ListNodesOutput, error) { + req, out := c.ListNodesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListNodesPages iterates over the pages of a ListNodes operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListNodes method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListNodes operation. +// pageNum := 0 +// err := client.ListNodesPages(params, +// func(page *kafka.ListNodesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Kafka) ListNodesPages(input *ListNodesInput, fn func(*ListNodesOutput, bool) bool) error { + return c.ListNodesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListNodesPagesWithContext same as ListNodesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kafka) ListNodesPagesWithContext(ctx aws.Context, input *ListNodesInput, fn func(*ListNodesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListNodesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListNodesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListNodesOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListTagsForResource +func (c *Kafka) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "GET", + HTTPPath: "/v1/tags/{resourceArn}", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for Managed Streaming for Kafka. +// +// Returns a list of the tags associated with the specified resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Managed Streaming for Kafka's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNotFoundException "NotFoundException" +// Returns information about an error. +// +// * ErrCodeBadRequestException "BadRequestException" +// Returns information about an error. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListTagsForResource +func (c *Kafka) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kafka) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/TagResource +func (c *Kafka) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/v1/tags/{resourceArn}", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for Managed Streaming for Kafka. +// +// Adds tags to the specified MSK resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Managed Streaming for Kafka's +// API operation TagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNotFoundException "NotFoundException" +// Returns information about an error. +// +// * ErrCodeBadRequestException "BadRequestException" +// Returns information about an error. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/TagResource +func (c *Kafka) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kafka) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UntagResource +func (c *Kafka) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "DELETE", + HTTPPath: "/v1/tags/{resourceArn}", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for Managed Streaming for Kafka. +// +// Removes the tags associated with the keys that are provided in the query. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Managed Streaming for Kafka's +// API operation UntagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNotFoundException "NotFoundException" // Returns information about an error. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListTagsForResource -func (c *Kafka) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { - req, out := c.ListTagsForResourceRequest(input) +// * ErrCodeBadRequestException "BadRequestException" +// Returns information about an error. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UntagResource +func (c *Kafka) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) return out, req.Send() } -// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// UntagResourceWithContext is the same as UntagResource with the addition of // the ability to pass a context and additional request options. // -// See ListTagsForResource for details on how to use this API operation. +// See UntagResource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Kafka) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { - req, out := c.ListTagsForResourceRequest(input) +func (c *Kafka) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opTagResource = "TagResource" +const opUpdateBrokerStorage = "UpdateBrokerStorage" -// TagResourceRequest generates a "aws/request.Request" representing the -// client's request for the TagResource operation. The "output" return +// UpdateBrokerStorageRequest generates a "aws/request.Request" representing the +// client's request for the UpdateBrokerStorage operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See TagResource for more information on using the TagResource +// See UpdateBrokerStorage for more information on using the UpdateBrokerStorage // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the TagResourceRequest method. -// req, resp := client.TagResourceRequest(params) +// // Example sending a request using the UpdateBrokerStorageRequest method. +// req, resp := client.UpdateBrokerStorageRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/TagResource -func (c *Kafka) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UpdateBrokerStorage +func (c *Kafka) UpdateBrokerStorageRequest(input *UpdateBrokerStorageInput) (req *request.Request, output *UpdateBrokerStorageOutput) { op := &request.Operation{ - Name: opTagResource, - HTTPMethod: "POST", - HTTPPath: "/v1/tags/{resourceArn}", + Name: opUpdateBrokerStorage, + HTTPMethod: "PUT", + HTTPPath: "/v1/clusters/{clusterArn}/nodes/storage", } if input == nil { - input = &TagResourceInput{} + input = &UpdateBrokerStorageInput{} } - output = &TagResourceOutput{} + output = &UpdateBrokerStorageOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// TagResource API operation for Managed Streaming for Kafka. +// UpdateBrokerStorage API operation for Managed Streaming for Kafka. // -// Add tags to a resource +// Updates the EBS storage associated with MSK brokers. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Managed Streaming for Kafka's -// API operation TagResource for usage and error information. +// API operation UpdateBrokerStorage for usage and error information. // // Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" // Returns information about an error. // // * ErrCodeBadRequestException "BadRequestException" // Returns information about an error. // +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Returns information about an error. +// // * ErrCodeInternalServerErrorException "InternalServerErrorException" // Returns information about an error. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/TagResource -func (c *Kafka) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) +// * ErrCodeForbiddenException "ForbiddenException" +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UpdateBrokerStorage +func (c *Kafka) UpdateBrokerStorage(input *UpdateBrokerStorageInput) (*UpdateBrokerStorageOutput, error) { + req, out := c.UpdateBrokerStorageRequest(input) return out, req.Send() } -// TagResourceWithContext is the same as TagResource with the addition of +// UpdateBrokerStorageWithContext is the same as UpdateBrokerStorage with the addition of // the ability to pass a context and additional request options. // -// See TagResource for details on how to use this API operation. +// See UpdateBrokerStorage for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Kafka) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) +func (c *Kafka) UpdateBrokerStorageWithContext(ctx aws.Context, input *UpdateBrokerStorageInput, opts ...request.Option) (*UpdateBrokerStorageOutput, error) { + req, out := c.UpdateBrokerStorageRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUntagResource = "UntagResource" +const opUpdateClusterConfiguration = "UpdateClusterConfiguration" -// UntagResourceRequest generates a "aws/request.Request" representing the -// client's request for the UntagResource operation. The "output" return +// UpdateClusterConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the UpdateClusterConfiguration operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UntagResource for more information on using the UntagResource +// See UpdateClusterConfiguration for more information on using the UpdateClusterConfiguration // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UntagResourceRequest method. -// req, resp := client.UntagResourceRequest(params) +// // Example sending a request using the UpdateClusterConfigurationRequest method. +// req, resp := client.UpdateClusterConfigurationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UntagResource -func (c *Kafka) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UpdateClusterConfiguration +func (c *Kafka) UpdateClusterConfigurationRequest(input *UpdateClusterConfigurationInput) (req *request.Request, output *UpdateClusterConfigurationOutput) { op := &request.Operation{ - Name: opUntagResource, - HTTPMethod: "DELETE", - HTTPPath: "/v1/tags/{resourceArn}", + Name: opUpdateClusterConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/v1/clusters/{clusterArn}/configuration", } if input == nil { - input = &UntagResourceInput{} + input = &UpdateClusterConfigurationInput{} } - output = &UntagResourceOutput{} + output = &UpdateClusterConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// UntagResource API operation for Managed Streaming for Kafka. +// UpdateClusterConfiguration API operation for Managed Streaming for Kafka. // -// Remove tags from a resource. +// Updates the cluster with the configuration that is specified in the request +// body. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Managed Streaming for Kafka's -// API operation UntagResource for usage and error information. +// API operation UpdateClusterConfiguration for usage and error information. // // Returned Error Codes: -// * ErrCodeNotFoundException "NotFoundException" +// * ErrCodeBadRequestException "BadRequestException" // Returns information about an error. // -// * ErrCodeBadRequestException "BadRequestException" +// * ErrCodeUnauthorizedException "UnauthorizedException" // Returns information about an error. // // * ErrCodeInternalServerErrorException "InternalServerErrorException" // Returns information about an error. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UntagResource -func (c *Kafka) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) +// * ErrCodeForbiddenException "ForbiddenException" +// Returns information about an error. +// +// * ErrCodeNotFoundException "NotFoundException" +// Returns information about an error. +// +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UpdateClusterConfiguration +func (c *Kafka) UpdateClusterConfiguration(input *UpdateClusterConfigurationInput) (*UpdateClusterConfigurationOutput, error) { + req, out := c.UpdateClusterConfigurationRequest(input) return out, req.Send() } -// UntagResourceWithContext is the same as UntagResource with the addition of +// UpdateClusterConfigurationWithContext is the same as UpdateClusterConfiguration with the addition of // the ability to pass a context and additional request options. // -// See UntagResource for details on how to use this API operation. +// See UpdateClusterConfiguration for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Kafka) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) +func (c *Kafka) UpdateClusterConfigurationWithContext(ctx aws.Context, input *UpdateClusterConfigurationInput, opts ...request.Option) (*UpdateClusterConfigurationOutput, error) { + req, out := c.UpdateClusterConfigurationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } +// Specifies the EBS volume upgrade information. The broker identifier must +// be set to the keyword ALL. This means the changes apply to all the brokers +// in the cluster. +type BrokerEBSVolumeInfo struct { + _ struct{} `type:"structure"` + + // The ID of the broker to update. + // + // KafkaBrokerNodeId is a required field + KafkaBrokerNodeId *string `locationName:"kafkaBrokerNodeId" type:"string" required:"true"` + + // Size of the EBS volume to update. + // + // VolumeSizeGB is a required field + VolumeSizeGB *int64 `locationName:"volumeSizeGB" type:"integer" required:"true"` +} + +// String returns the string representation +func (s BrokerEBSVolumeInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BrokerEBSVolumeInfo) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BrokerEBSVolumeInfo) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BrokerEBSVolumeInfo"} + if s.KafkaBrokerNodeId == nil { + invalidParams.Add(request.NewErrParamRequired("KafkaBrokerNodeId")) + } + if s.VolumeSizeGB == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeSizeGB")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKafkaBrokerNodeId sets the KafkaBrokerNodeId field's value. +func (s *BrokerEBSVolumeInfo) SetKafkaBrokerNodeId(v string) *BrokerEBSVolumeInfo { + s.KafkaBrokerNodeId = &v + return s +} + +// SetVolumeSizeGB sets the VolumeSizeGB field's value. +func (s *BrokerEBSVolumeInfo) SetVolumeSizeGB(v int64) *BrokerEBSVolumeInfo { + s.VolumeSizeGB = &v + return s +} + // Describes the setup to be used for Kafka broker nodes in the cluster. type BrokerNodeGroupInfo struct { _ struct{} `type:"structure"` @@ -1382,7 +2010,8 @@ type BrokerNodeGroupInfo struct { // The AWS security groups to associate with the elastic network interfaces // in order to specify who can connect to and communicate with the Amazon MSK - // cluster. + // cluster. If you don't specify a security group, Amazon MSK uses the default + // security group associated with the VPC. SecurityGroups []*string `locationName:"securityGroups" type:"list"` // Contains information about storage volumes attached to MSK broker nodes. @@ -1472,6 +2101,9 @@ type BrokerNodeInfo struct { // Information about the version of software currently deployed on the Kafka // brokers in the cluster. CurrentBrokerSoftwareInfo *BrokerSoftwareInfo `locationName:"currentBrokerSoftwareInfo" type:"structure"` + + // Endpoints for accessing the broker. + Endpoints []*string `locationName:"endpoints" type:"list"` } // String returns the string representation @@ -1514,14 +2146,22 @@ func (s *BrokerNodeInfo) SetCurrentBrokerSoftwareInfo(v *BrokerSoftwareInfo) *Br return s } +// SetEndpoints sets the Endpoints field's value. +func (s *BrokerNodeInfo) SetEndpoints(v []*string) *BrokerNodeInfo { + s.Endpoints = v + return s +} + // Information about the current software installed on the cluster. type BrokerSoftwareInfo struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the configuration used for the cluster. + // This field isn't visible in this preview release. ConfigurationArn *string `locationName:"configurationArn" type:"string"` - // The revision of the configuration to use. + // The revision of the configuration to use. This field isn't visible in this + // preview release. ConfigurationRevision *int64 `locationName:"configurationRevision" type:"long"` // The version of Apache Kafka. @@ -1556,13 +2196,43 @@ func (s *BrokerSoftwareInfo) SetKafkaVersion(v string) *BrokerSoftwareInfo { return s } +// Includes all client authentication information. +type ClientAuthentication struct { + _ struct{} `type:"structure"` + + // Details for ClientAuthentication using TLS. + Tls *Tls `locationName:"tls" type:"structure"` +} + +// String returns the string representation +func (s ClientAuthentication) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClientAuthentication) GoString() string { + return s.String() +} + +// SetTls sets the Tls field's value. +func (s *ClientAuthentication) SetTls(v *Tls) *ClientAuthentication { + s.Tls = v + return s +} + // Returns information about a cluster. type ClusterInfo struct { _ struct{} `type:"structure"` + // Arn of active cluster operation. + ActiveOperationArn *string `locationName:"activeOperationArn" type:"string"` + // Information about the broker nodes. BrokerNodeGroupInfo *BrokerNodeGroupInfo `locationName:"brokerNodeGroupInfo" type:"structure"` + // Includes all client authentication information. + ClientAuthentication *ClientAuthentication `locationName:"clientAuthentication" type:"structure"` + // The Amazon Resource Name (ARN) that uniquely identifies the cluster. ClusterArn *string `locationName:"clusterArn" type:"string"` @@ -1583,15 +2253,20 @@ type ClusterInfo struct { EncryptionInfo *EncryptionInfo `locationName:"encryptionInfo" type:"structure"` // Specifies which metrics are gathered for the MSK cluster. This property has - // three possible values: DEFAULT, PER_BROKER, and PER_TOPIC_PER_BROKER. + // three possible values: DEFAULT, PER_BROKER, and PER_TOPIC_PER_BROKER. For + // a list of the metrics associated with each of these three levels of monitoring, + // see Monitoring (https://docs.aws.amazon.com/msk/latest/developerguide/monitoring.html). EnhancedMonitoring *string `locationName:"enhancedMonitoring" type:"string" enum:"EnhancedMonitoring"` - // The number of Kafka broker nodes in the cluster. + // The number of broker nodes in the cluster. NumberOfBrokerNodes *int64 `locationName:"numberOfBrokerNodes" type:"integer"` // The state of the cluster. The possible states are CREATING, ACTIVE, and FAILED. State *string `locationName:"state" type:"string" enum:"ClusterState"` + // Tags attached to the cluster. + Tags map[string]*string `locationName:"tags" type:"map"` + // The connection string to use to connect to the Apache ZooKeeper cluster. ZookeeperConnectString *string `locationName:"zookeeperConnectString" type:"string"` } @@ -1606,12 +2281,24 @@ func (s ClusterInfo) GoString() string { return s.String() } +// SetActiveOperationArn sets the ActiveOperationArn field's value. +func (s *ClusterInfo) SetActiveOperationArn(v string) *ClusterInfo { + s.ActiveOperationArn = &v + return s +} + // SetBrokerNodeGroupInfo sets the BrokerNodeGroupInfo field's value. func (s *ClusterInfo) SetBrokerNodeGroupInfo(v *BrokerNodeGroupInfo) *ClusterInfo { s.BrokerNodeGroupInfo = v return s } +// SetClientAuthentication sets the ClientAuthentication field's value. +func (s *ClusterInfo) SetClientAuthentication(v *ClientAuthentication) *ClusterInfo { + s.ClientAuthentication = v + return s +} + // SetClusterArn sets the ClusterArn field's value. func (s *ClusterInfo) SetClusterArn(v string) *ClusterInfo { s.ClusterArn = &v @@ -1666,12 +2353,123 @@ func (s *ClusterInfo) SetState(v string) *ClusterInfo { return s } +// SetTags sets the Tags field's value. +func (s *ClusterInfo) SetTags(v map[string]*string) *ClusterInfo { + s.Tags = v + return s +} + // SetZookeeperConnectString sets the ZookeeperConnectString field's value. func (s *ClusterInfo) SetZookeeperConnectString(v string) *ClusterInfo { s.ZookeeperConnectString = &v return s } +// Returns information about a cluster operation. +type ClusterOperationInfo struct { + _ struct{} `type:"structure"` + + // The ID of the API request that triggered this operation. + ClientRequestId *string `locationName:"clientRequestId" type:"string"` + + // ARN of the cluster. + ClusterArn *string `locationName:"clusterArn" type:"string"` + + // The time at which operation was created. + CreationTime *time.Time `locationName:"creationTime" type:"timestamp" timestampFormat:"iso8601"` + + // The time at which the operation finished. + EndTime *time.Time `locationName:"endTime" type:"timestamp" timestampFormat:"iso8601"` + + // Describes the error if the operation fails. + ErrorInfo *ErrorInfo `locationName:"errorInfo" type:"structure"` + + // ARN of the cluster operation. + OperationArn *string `locationName:"operationArn" type:"string"` + + // State of the cluster operation. + OperationState *string `locationName:"operationState" type:"string"` + + // Type of the cluster operation. + OperationType *string `locationName:"operationType" type:"string"` + + // Information about cluster attributes before a cluster is updated. + SourceClusterInfo *MutableClusterInfo `locationName:"sourceClusterInfo" type:"structure"` + + // Information about cluster attributes after a cluster is updated. + TargetClusterInfo *MutableClusterInfo `locationName:"targetClusterInfo" type:"structure"` +} + +// String returns the string representation +func (s ClusterOperationInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterOperationInfo) GoString() string { + return s.String() +} + +// SetClientRequestId sets the ClientRequestId field's value. +func (s *ClusterOperationInfo) SetClientRequestId(v string) *ClusterOperationInfo { + s.ClientRequestId = &v + return s +} + +// SetClusterArn sets the ClusterArn field's value. +func (s *ClusterOperationInfo) SetClusterArn(v string) *ClusterOperationInfo { + s.ClusterArn = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *ClusterOperationInfo) SetCreationTime(v time.Time) *ClusterOperationInfo { + s.CreationTime = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *ClusterOperationInfo) SetEndTime(v time.Time) *ClusterOperationInfo { + s.EndTime = &v + return s +} + +// SetErrorInfo sets the ErrorInfo field's value. +func (s *ClusterOperationInfo) SetErrorInfo(v *ErrorInfo) *ClusterOperationInfo { + s.ErrorInfo = v + return s +} + +// SetOperationArn sets the OperationArn field's value. +func (s *ClusterOperationInfo) SetOperationArn(v string) *ClusterOperationInfo { + s.OperationArn = &v + return s +} + +// SetOperationState sets the OperationState field's value. +func (s *ClusterOperationInfo) SetOperationState(v string) *ClusterOperationInfo { + s.OperationState = &v + return s +} + +// SetOperationType sets the OperationType field's value. +func (s *ClusterOperationInfo) SetOperationType(v string) *ClusterOperationInfo { + s.OperationType = &v + return s +} + +// SetSourceClusterInfo sets the SourceClusterInfo field's value. +func (s *ClusterOperationInfo) SetSourceClusterInfo(v *MutableClusterInfo) *ClusterOperationInfo { + s.SourceClusterInfo = v + return s +} + +// SetTargetClusterInfo sets the TargetClusterInfo field's value. +func (s *ClusterOperationInfo) SetTargetClusterInfo(v *MutableClusterInfo) *ClusterOperationInfo { + s.TargetClusterInfo = v + return s +} + // Represents an MSK Configuration. type Configuration struct { _ struct{} `type:"structure"` @@ -1689,10 +2487,14 @@ type Configuration struct { // Description is a required field Description *string `locationName:"description" type:"string" required:"true"` + // An array of the versions of Apache Kafka with which you can use this MSK + // configuration. You can use this configuration for an MSK cluster only if + // the Apache Kafka version specified for the cluster appears in this array. + // // KafkaVersions is a required field KafkaVersions []*string `locationName:"kafkaVersions" type:"list" required:"true"` - // Describes a configuration revision. + // Latest revision of the configuration. // // LatestRevision is a required field LatestRevision *ConfigurationRevision `locationName:"latestRevision" type:"structure" required:"true"` @@ -1749,7 +2551,7 @@ func (s *Configuration) SetName(v string) *Configuration { return s } -// Specifies the Kafka configuration to use for the brokers. +// Specifies the configuration to use for the brokers. type ConfigurationInfo struct { _ struct{} `type:"structure"` @@ -1857,12 +2659,16 @@ type CreateClusterInput struct { // BrokerNodeGroupInfo is a required field BrokerNodeGroupInfo *BrokerNodeGroupInfo `locationName:"brokerNodeGroupInfo" type:"structure" required:"true"` + // Includes all client authentication related information. + ClientAuthentication *ClientAuthentication `locationName:"clientAuthentication" type:"structure"` + // The name of the cluster. // // ClusterName is a required field ClusterName *string `locationName:"clusterName" min:"1" type:"string" required:"true"` - // Comprises of the Configuration to be used on Kafka brokers in a cluster. + // Represents the configuration that you want MSK to use for the brokers in + // a cluster. ConfigurationInfo *ConfigurationInfo `locationName:"configurationInfo" type:"structure"` // Includes all encryption-related information. @@ -1877,10 +2683,13 @@ type CreateClusterInput struct { // KafkaVersion is a required field KafkaVersion *string `locationName:"kafkaVersion" min:"1" type:"string" required:"true"` - // The number of Kafka broker nodes in the Amazon MSK cluster. + // The number of broker nodes in the cluster. // // NumberOfBrokerNodes is a required field NumberOfBrokerNodes *int64 `locationName:"numberOfBrokerNodes" min:"1" type:"integer" required:"true"` + + // Create tags when creating the cluster. + Tags map[string]*string `locationName:"tags" type:"map"` } // String returns the string representation @@ -1945,6 +2754,12 @@ func (s *CreateClusterInput) SetBrokerNodeGroupInfo(v *BrokerNodeGroupInfo) *Cre return s } +// SetClientAuthentication sets the ClientAuthentication field's value. +func (s *CreateClusterInput) SetClientAuthentication(v *ClientAuthentication) *CreateClusterInput { + s.ClientAuthentication = v + return s +} + // SetClusterName sets the ClusterName field's value. func (s *CreateClusterInput) SetClusterName(v string) *CreateClusterInput { s.ClusterName = &v @@ -1981,6 +2796,12 @@ func (s *CreateClusterInput) SetNumberOfBrokerNodes(v int64) *CreateClusterInput return s } +// SetTags sets the Tags field's value. +func (s *CreateClusterInput) SetTags(v map[string]*string) *CreateClusterInput { + s.Tags = v + return s +} + // Returns information about the created cluster. type CreateClusterOutput struct { _ struct{} `type:"structure"` @@ -2040,11 +2861,6 @@ type CreateConfigurationInput struct { // Name is a required field Name *string `locationName:"name" type:"string" required:"true"` - // Contents of the server.properties file. When using the API, you must ensure - // that the contents of the file are base64 encoded. When using the AWS Management - // Console, the SDK, or the AWS CLI, the contents of server.properties can be - // in plaintext. - // // ServerProperties is automatically base64 encoded/decoded by the SDK. // // ServerProperties is a required field @@ -2274,6 +3090,69 @@ func (s *DescribeClusterInput) SetClusterArn(v string) *DescribeClusterInput { return s } +type DescribeClusterOperationInput struct { + _ struct{} `type:"structure"` + + // ClusterOperationArn is a required field + ClusterOperationArn *string `location:"uri" locationName:"clusterOperationArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeClusterOperationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterOperationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeClusterOperationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeClusterOperationInput"} + if s.ClusterOperationArn == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterOperationArn")) + } + if s.ClusterOperationArn != nil && len(*s.ClusterOperationArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClusterOperationArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClusterOperationArn sets the ClusterOperationArn field's value. +func (s *DescribeClusterOperationInput) SetClusterOperationArn(v string) *DescribeClusterOperationInput { + s.ClusterOperationArn = &v + return s +} + +// Information about a cluster operation. +type DescribeClusterOperationOutput struct { + _ struct{} `type:"structure"` + + // Cluster operation information + ClusterOperationInfo *ClusterOperationInfo `locationName:"clusterOperationInfo" type:"structure"` +} + +// String returns the string representation +func (s DescribeClusterOperationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeClusterOperationOutput) GoString() string { + return s.String() +} + +// SetClusterOperationInfo sets the ClusterOperationInfo field's value. +func (s *DescribeClusterOperationOutput) SetClusterOperationInfo(v *ClusterOperationInfo) *DescribeClusterOperationOutput { + s.ClusterOperationInfo = v + return s +} + // Returns information about a cluster. type DescribeClusterOutput struct { _ struct{} `type:"structure"` @@ -2470,13 +3349,9 @@ type DescribeConfigurationRevisionOutput struct { // The description of the configuration. Description *string `locationName:"description" type:"string"` + // The revision number. Revision *int64 `locationName:"revision" type:"long"` - // Contents of the server.properties file. When using the API, you must ensure - // that the contents of the file are base64 encoded. When using the AWS Management - // Console, the SDK, or the AWS CLI, the contents of server.properties can be - // in plaintext. - // // ServerProperties is automatically base64 encoded/decoded by the SDK. ServerProperties []byte `locationName:"serverProperties" type:"blob"` } @@ -2559,11 +3434,12 @@ func (s *EBSStorageInfo) SetVolumeSize(v int64) *EBSStorageInfo { return s } -// The data volume encryption details. +// The data-volume encryption details. type EncryptionAtRest struct { _ struct{} `type:"structure"` - // The AWS KMS key used for data encryption. + // The ARN of the AWS KMS key for encrypting data at rest. If you don't specify + // a KMS key, MSK creates one for you and uses it. // // DataVolumeKMSKeyId is a required field DataVolumeKMSKeyId *string `locationName:"dataVolumeKMSKeyId" type:"string" required:"true"` @@ -2598,13 +3474,65 @@ func (s *EncryptionAtRest) SetDataVolumeKMSKeyId(v string) *EncryptionAtRest { return s } +// The settings for encrypting data in transit. +type EncryptionInTransit struct { + _ struct{} `type:"structure"` + + // Indicates the encryption setting for data in transit between clients and + // brokers. The following are the possible values. + // + // TLS means that client-broker communication is enabled with TLS only. + // + // TLS_PLAINTEXT means that client-broker communication is enabled for both + // TLS-encrypted, as well as plaintext data. + // + // PLAINTEXT means that client-broker communication is enabled in plaintext + // only. + // + // The default value is TLS_PLAINTEXT. + ClientBroker *string `locationName:"clientBroker" type:"string" enum:"ClientBroker"` + + // When set to true, it indicates that data communication among the broker nodes + // of the cluster is encrypted. When set to false, the communication happens + // in plaintext. + // + // The default value is true. + InCluster *bool `locationName:"inCluster" type:"boolean"` +} + +// String returns the string representation +func (s EncryptionInTransit) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EncryptionInTransit) GoString() string { + return s.String() +} + +// SetClientBroker sets the ClientBroker field's value. +func (s *EncryptionInTransit) SetClientBroker(v string) *EncryptionInTransit { + s.ClientBroker = &v + return s +} + +// SetInCluster sets the InCluster field's value. +func (s *EncryptionInTransit) SetInCluster(v bool) *EncryptionInTransit { + s.InCluster = &v + return s +} + // Includes encryption-related information, such as the AWS KMS key used for -// encrypting data at rest. +// encrypting data at rest and whether you want MSK to encrypt your data in +// transit. type EncryptionInfo struct { _ struct{} `type:"structure"` - // The data volume encryption details. + // The data-volume encryption details. EncryptionAtRest *EncryptionAtRest `locationName:"encryptionAtRest" type:"structure"` + + // The details for encryption in transit. + EncryptionInTransit *EncryptionInTransit `locationName:"encryptionInTransit" type:"structure"` } // String returns the string representation @@ -2638,6 +3566,45 @@ func (s *EncryptionInfo) SetEncryptionAtRest(v *EncryptionAtRest) *EncryptionInf return s } +// SetEncryptionInTransit sets the EncryptionInTransit field's value. +func (s *EncryptionInfo) SetEncryptionInTransit(v *EncryptionInTransit) *EncryptionInfo { + s.EncryptionInTransit = v + return s +} + +// Returns information about an error state of the cluster. +type ErrorInfo struct { + _ struct{} `type:"structure"` + + // A number describing the error programmatically. + ErrorCode *string `locationName:"errorCode" type:"string"` + + // An optional field to provide more details about the error. + ErrorString *string `locationName:"errorString" type:"string"` +} + +// String returns the string representation +func (s ErrorInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ErrorInfo) GoString() string { + return s.String() +} + +// SetErrorCode sets the ErrorCode field's value. +func (s *ErrorInfo) SetErrorCode(v string) *ErrorInfo { + s.ErrorCode = &v + return s +} + +// SetErrorString sets the ErrorString field's value. +func (s *ErrorInfo) SetErrorString(v string) *ErrorInfo { + s.ErrorString = &v + return s +} + type GetBootstrapBrokersInput struct { _ struct{} `type:"structure"` @@ -2683,6 +3650,9 @@ type GetBootstrapBrokersOutput struct { // A string containing one or more hostname:port pairs. BootstrapBrokerString *string `locationName:"bootstrapBrokerString" type:"string"` + + // A string containing one or more DNS names (or IP) and TLS port pairs. + BootstrapBrokerStringTls *string `locationName:"bootstrapBrokerStringTls" type:"string"` } // String returns the string representation @@ -2701,6 +3671,106 @@ func (s *GetBootstrapBrokersOutput) SetBootstrapBrokerString(v string) *GetBoots return s } +// SetBootstrapBrokerStringTls sets the BootstrapBrokerStringTls field's value. +func (s *GetBootstrapBrokersOutput) SetBootstrapBrokerStringTls(v string) *GetBootstrapBrokersOutput { + s.BootstrapBrokerStringTls = &v + return s +} + +type ListClusterOperationsInput struct { + _ struct{} `type:"structure"` + + // ClusterArn is a required field + ClusterArn *string `location:"uri" locationName:"clusterArn" type:"string" required:"true"` + + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListClusterOperationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListClusterOperationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListClusterOperationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListClusterOperationsInput"} + if s.ClusterArn == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterArn")) + } + if s.ClusterArn != nil && len(*s.ClusterArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClusterArn", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClusterArn sets the ClusterArn field's value. +func (s *ListClusterOperationsInput) SetClusterArn(v string) *ListClusterOperationsInput { + s.ClusterArn = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListClusterOperationsInput) SetMaxResults(v int64) *ListClusterOperationsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListClusterOperationsInput) SetNextToken(v string) *ListClusterOperationsInput { + s.NextToken = &v + return s +} + +// The response contains an array containing cluster operation information and +// a next token if the response is truncated. +type ListClusterOperationsOutput struct { + _ struct{} `type:"structure"` + + // An array of cluster operation information objects. + ClusterOperationInfoList []*ClusterOperationInfo `locationName:"clusterOperationInfoList" type:"list"` + + // If the response of ListClusterOperations is truncated, it returns a NextToken + // in the response. This Nexttoken should be sent in the subsequent request + // to ListClusterOperations. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListClusterOperationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListClusterOperationsOutput) GoString() string { + return s.String() +} + +// SetClusterOperationInfoList sets the ClusterOperationInfoList field's value. +func (s *ListClusterOperationsOutput) SetClusterOperationInfoList(v []*ClusterOperationInfo) *ListClusterOperationsOutput { + s.ClusterOperationInfoList = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListClusterOperationsOutput) SetNextToken(v string) *ListClusterOperationsOutput { + s.NextToken = &v + return s +} + type ListClustersInput struct { _ struct{} `type:"structure"` @@ -2712,18 +3782,112 @@ type ListClustersInput struct { } // String returns the string representation -func (s ListClustersInput) String() string { +func (s ListClustersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListClustersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListClustersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListClustersInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClusterNameFilter sets the ClusterNameFilter field's value. +func (s *ListClustersInput) SetClusterNameFilter(v string) *ListClustersInput { + s.ClusterNameFilter = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListClustersInput) SetMaxResults(v int64) *ListClustersInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListClustersInput) SetNextToken(v string) *ListClustersInput { + s.NextToken = &v + return s +} + +// The response contains an array containing cluster information and a next +// token if the response is truncated. +type ListClustersOutput struct { + _ struct{} `type:"structure"` + + // Information on each of the MSK clusters in the response. + ClusterInfoList []*ClusterInfo `locationName:"clusterInfoList" type:"list"` + + // The paginated results marker. When the result of a ListClusters operation + // is truncated, the call returns NextToken in the response. To get another + // batch of clusters, provide this token in your next request. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListClustersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListClustersOutput) GoString() string { + return s.String() +} + +// SetClusterInfoList sets the ClusterInfoList field's value. +func (s *ListClustersOutput) SetClusterInfoList(v []*ClusterInfo) *ListClustersOutput { + s.ClusterInfoList = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListClustersOutput) SetNextToken(v string) *ListClustersOutput { + s.NextToken = &v + return s +} + +type ListConfigurationRevisionsInput struct { + _ struct{} `type:"structure"` + + // Arn is a required field + Arn *string `location:"uri" locationName:"arn" type:"string" required:"true"` + + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListConfigurationRevisionsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListClustersInput) GoString() string { +func (s ListConfigurationRevisionsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListClustersInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListClustersInput"} +func (s *ListConfigurationRevisionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListConfigurationRevisionsInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 1)) + } if s.MaxResults != nil && *s.MaxResults < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } @@ -2734,64 +3898,61 @@ func (s *ListClustersInput) Validate() error { return nil } -// SetClusterNameFilter sets the ClusterNameFilter field's value. -func (s *ListClustersInput) SetClusterNameFilter(v string) *ListClustersInput { - s.ClusterNameFilter = &v +// SetArn sets the Arn field's value. +func (s *ListConfigurationRevisionsInput) SetArn(v string) *ListConfigurationRevisionsInput { + s.Arn = &v return s } // SetMaxResults sets the MaxResults field's value. -func (s *ListClustersInput) SetMaxResults(v int64) *ListClustersInput { +func (s *ListConfigurationRevisionsInput) SetMaxResults(v int64) *ListConfigurationRevisionsInput { s.MaxResults = &v return s } // SetNextToken sets the NextToken field's value. -func (s *ListClustersInput) SetNextToken(v string) *ListClustersInput { +func (s *ListConfigurationRevisionsInput) SetNextToken(v string) *ListConfigurationRevisionsInput { s.NextToken = &v return s } -// The response contains an array containing cluster information and a next -// token if the response is truncated. -type ListClustersOutput struct { +// Information about revisions of an MSK configuration. +type ListConfigurationRevisionsOutput struct { _ struct{} `type:"structure"` - // Information on each of the MSK clusters in the response. - ClusterInfoList []*ClusterInfo `locationName:"clusterInfoList" type:"list"` - - // The paginated results marker. When the result of a ListClusters operation - // is truncated, the call returns NextToken in the response. To get another - // batch of clusters, provide this token in your next request. + // Paginated results marker. NextToken *string `locationName:"nextToken" type:"string"` + + // List of ConfigurationRevision objects. + Revisions []*ConfigurationRevision `locationName:"revisions" type:"list"` } // String returns the string representation -func (s ListClustersOutput) String() string { +func (s ListConfigurationRevisionsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListClustersOutput) GoString() string { +func (s ListConfigurationRevisionsOutput) GoString() string { return s.String() } -// SetClusterInfoList sets the ClusterInfoList field's value. -func (s *ListClustersOutput) SetClusterInfoList(v []*ClusterInfo) *ListClustersOutput { - s.ClusterInfoList = v +// SetNextToken sets the NextToken field's value. +func (s *ListConfigurationRevisionsOutput) SetNextToken(v string) *ListConfigurationRevisionsOutput { + s.NextToken = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListClustersOutput) SetNextToken(v string) *ListClustersOutput { - s.NextToken = &v +// SetRevisions sets the Revisions field's value. +func (s *ListConfigurationRevisionsOutput) SetRevisions(v []*ConfigurationRevision) *ListConfigurationRevisionsOutput { + s.Revisions = v return s } type ListConfigurationsInput struct { _ struct{} `type:"structure"` - MaxResults *string `location:"querystring" locationName:"maxResults" type:"string"` + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } @@ -2806,8 +3967,21 @@ func (s ListConfigurationsInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListConfigurationsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetMaxResults sets the MaxResults field's value. -func (s *ListConfigurationsInput) SetMaxResults(v string) *ListConfigurationsInput { +func (s *ListConfigurationsInput) SetMaxResults(v int64) *ListConfigurationsInput { s.MaxResults = &v return s } @@ -2823,6 +3997,7 @@ func (s *ListConfigurationsInput) SetNextToken(v string) *ListConfigurationsInpu type ListConfigurationsOutput struct { _ struct{} `type:"structure"` + // An array of MSK configurations. Configurations []*Configuration `locationName:"configurations" type:"list"` // The paginated results marker. When the result of a ListConfigurations operation @@ -2985,11 +4160,11 @@ func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResource return s } -// List tags for a resource +// Response of listing tags for a resource. type ListTagsForResourceOutput struct { _ struct{} `type:"structure"` - // The Key value pairs indicating resource tags. + // The key-value pair for the resource tag. Tags map[string]*string `locationName:"tags" type:"map"` } @@ -3009,6 +4184,48 @@ func (s *ListTagsForResourceOutput) SetTags(v map[string]*string) *ListTagsForRe return s } +// Information about cluster attributes that can be updated via update APIs. +type MutableClusterInfo struct { + _ struct{} `type:"structure"` + + // Specifies the size of the EBS volume and the ID of the associated broker. + BrokerEBSVolumeInfo []*BrokerEBSVolumeInfo `locationName:"brokerEBSVolumeInfo" type:"list"` + + // Information about the changes in the configuration of the brokers. + ConfigurationInfo *ConfigurationInfo `locationName:"configurationInfo" type:"structure"` + + // The number of broker nodes in the cluster. + NumberOfBrokerNodes *int64 `locationName:"numberOfBrokerNodes" type:"integer"` +} + +// String returns the string representation +func (s MutableClusterInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MutableClusterInfo) GoString() string { + return s.String() +} + +// SetBrokerEBSVolumeInfo sets the BrokerEBSVolumeInfo field's value. +func (s *MutableClusterInfo) SetBrokerEBSVolumeInfo(v []*BrokerEBSVolumeInfo) *MutableClusterInfo { + s.BrokerEBSVolumeInfo = v + return s +} + +// SetConfigurationInfo sets the ConfigurationInfo field's value. +func (s *MutableClusterInfo) SetConfigurationInfo(v *ConfigurationInfo) *MutableClusterInfo { + s.ConfigurationInfo = v + return s +} + +// SetNumberOfBrokerNodes sets the NumberOfBrokerNodes field's value. +func (s *MutableClusterInfo) SetNumberOfBrokerNodes(v int64) *MutableClusterInfo { + s.NumberOfBrokerNodes = &v + return s +} + // The node information object. type NodeInfo struct { _ struct{} `type:"structure"` @@ -3117,14 +4334,14 @@ func (s *StorageInfo) SetEbsStorageInfo(v *EBSStorageInfo) *StorageInfo { return s } -// Add tags for a resource +// Tag a resource. type TagResourceInput struct { _ struct{} `type:"structure"` // ResourceArn is a required field ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` - // The Key value pairs indicating resource tags. + // The key-value pair for the resource tag. // // Tags is a required field Tags map[string]*string `locationName:"tags" type:"map" required:"true"` @@ -3185,6 +4402,30 @@ func (s TagResourceOutput) GoString() string { return s.String() } +// Details for client authentication using TLS. +type Tls struct { + _ struct{} `type:"structure"` + + // List of ACM Certificate Authority ARNs. + CertificateAuthorityArnList []*string `locationName:"certificateAuthorityArnList" type:"list"` +} + +// String returns the string representation +func (s Tls) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tls) GoString() string { + return s.String() +} + +// SetCertificateAuthorityArnList sets the CertificateAuthorityArnList field's value. +func (s *Tls) SetCertificateAuthorityArnList(v []*string) *Tls { + s.CertificateAuthorityArnList = v + return s +} + type UntagResourceInput struct { _ struct{} `type:"structure"` @@ -3250,6 +4491,226 @@ func (s UntagResourceOutput) GoString() string { return s.String() } +// Request object for UpdateBrokerStorage. +type UpdateBrokerStorageInput struct { + _ struct{} `type:"structure"` + + // ClusterArn is a required field + ClusterArn *string `location:"uri" locationName:"clusterArn" type:"string" required:"true"` + + // The version of cluster to update from. A successful operation will then generate + // a new version. + // + // CurrentVersion is a required field + CurrentVersion *string `locationName:"currentVersion" type:"string" required:"true"` + + // Describes the target volume size and the ID of the broker to apply the update + // to. + // + // TargetBrokerEBSVolumeInfo is a required field + TargetBrokerEBSVolumeInfo []*BrokerEBSVolumeInfo `locationName:"targetBrokerEBSVolumeInfo" type:"list" required:"true"` +} + +// String returns the string representation +func (s UpdateBrokerStorageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBrokerStorageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateBrokerStorageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateBrokerStorageInput"} + if s.ClusterArn == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterArn")) + } + if s.ClusterArn != nil && len(*s.ClusterArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClusterArn", 1)) + } + if s.CurrentVersion == nil { + invalidParams.Add(request.NewErrParamRequired("CurrentVersion")) + } + if s.TargetBrokerEBSVolumeInfo == nil { + invalidParams.Add(request.NewErrParamRequired("TargetBrokerEBSVolumeInfo")) + } + if s.TargetBrokerEBSVolumeInfo != nil { + for i, v := range s.TargetBrokerEBSVolumeInfo { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetBrokerEBSVolumeInfo", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClusterArn sets the ClusterArn field's value. +func (s *UpdateBrokerStorageInput) SetClusterArn(v string) *UpdateBrokerStorageInput { + s.ClusterArn = &v + return s +} + +// SetCurrentVersion sets the CurrentVersion field's value. +func (s *UpdateBrokerStorageInput) SetCurrentVersion(v string) *UpdateBrokerStorageInput { + s.CurrentVersion = &v + return s +} + +// SetTargetBrokerEBSVolumeInfo sets the TargetBrokerEBSVolumeInfo field's value. +func (s *UpdateBrokerStorageInput) SetTargetBrokerEBSVolumeInfo(v []*BrokerEBSVolumeInfo) *UpdateBrokerStorageInput { + s.TargetBrokerEBSVolumeInfo = v + return s +} + +// Response body for UpdateBrokerStorage. +type UpdateBrokerStorageOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the cluster. + ClusterArn *string `locationName:"clusterArn" type:"string"` + + // The Amazon Resource Name (ARN) of the cluster operation. + ClusterOperationArn *string `locationName:"clusterOperationArn" type:"string"` +} + +// String returns the string representation +func (s UpdateBrokerStorageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBrokerStorageOutput) GoString() string { + return s.String() +} + +// SetClusterArn sets the ClusterArn field's value. +func (s *UpdateBrokerStorageOutput) SetClusterArn(v string) *UpdateBrokerStorageOutput { + s.ClusterArn = &v + return s +} + +// SetClusterOperationArn sets the ClusterOperationArn field's value. +func (s *UpdateBrokerStorageOutput) SetClusterOperationArn(v string) *UpdateBrokerStorageOutput { + s.ClusterOperationArn = &v + return s +} + +// Request body for UpdateClusterConfiguration. +type UpdateClusterConfigurationInput struct { + _ struct{} `type:"structure"` + + // ClusterArn is a required field + ClusterArn *string `location:"uri" locationName:"clusterArn" type:"string" required:"true"` + + // Represents the configuration that you want MSK to use for the brokers in + // a cluster. + // + // ConfigurationInfo is a required field + ConfigurationInfo *ConfigurationInfo `locationName:"configurationInfo" type:"structure" required:"true"` + + // The version of the cluster that needs to be updated. + // + // CurrentVersion is a required field + CurrentVersion *string `locationName:"currentVersion" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateClusterConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateClusterConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateClusterConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateClusterConfigurationInput"} + if s.ClusterArn == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterArn")) + } + if s.ClusterArn != nil && len(*s.ClusterArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClusterArn", 1)) + } + if s.ConfigurationInfo == nil { + invalidParams.Add(request.NewErrParamRequired("ConfigurationInfo")) + } + if s.CurrentVersion == nil { + invalidParams.Add(request.NewErrParamRequired("CurrentVersion")) + } + if s.ConfigurationInfo != nil { + if err := s.ConfigurationInfo.Validate(); err != nil { + invalidParams.AddNested("ConfigurationInfo", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClusterArn sets the ClusterArn field's value. +func (s *UpdateClusterConfigurationInput) SetClusterArn(v string) *UpdateClusterConfigurationInput { + s.ClusterArn = &v + return s +} + +// SetConfigurationInfo sets the ConfigurationInfo field's value. +func (s *UpdateClusterConfigurationInput) SetConfigurationInfo(v *ConfigurationInfo) *UpdateClusterConfigurationInput { + s.ConfigurationInfo = v + return s +} + +// SetCurrentVersion sets the CurrentVersion field's value. +func (s *UpdateClusterConfigurationInput) SetCurrentVersion(v string) *UpdateClusterConfigurationInput { + s.CurrentVersion = &v + return s +} + +// Response body for UpdateClusterConfiguration. +type UpdateClusterConfigurationOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the cluster. + ClusterArn *string `locationName:"clusterArn" type:"string"` + + // The Amazon Resource Name (ARN) of the cluster operation. + ClusterOperationArn *string `locationName:"clusterOperationArn" type:"string"` +} + +// String returns the string representation +func (s UpdateClusterConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateClusterConfigurationOutput) GoString() string { + return s.String() +} + +// SetClusterArn sets the ClusterArn field's value. +func (s *UpdateClusterConfigurationOutput) SetClusterArn(v string) *UpdateClusterConfigurationOutput { + s.ClusterArn = &v + return s +} + +// SetClusterOperationArn sets the ClusterOperationArn field's value. +func (s *UpdateClusterConfigurationOutput) SetClusterOperationArn(v string) *UpdateClusterConfigurationOutput { + s.ClusterOperationArn = &v + return s +} + // Zookeeper node information. type ZookeeperNodeInfo struct { _ struct{} `type:"structure"` @@ -3260,6 +4721,9 @@ type ZookeeperNodeInfo struct { // The virtual private cloud (VPC) IP address of the client. ClientVpcIpAddress *string `locationName:"clientVpcIpAddress" type:"string"` + // Endpoints for accessing the ZooKeeper. + Endpoints []*string `locationName:"endpoints" type:"list"` + // The role-specific ID for Zookeeper. ZookeeperId *float64 `locationName:"zookeeperId" type:"double"` @@ -3289,6 +4753,12 @@ func (s *ZookeeperNodeInfo) SetClientVpcIpAddress(v string) *ZookeeperNodeInfo { return s } +// SetEndpoints sets the Endpoints field's value. +func (s *ZookeeperNodeInfo) SetEndpoints(v []*string) *ZookeeperNodeInfo { + s.Endpoints = v + return s +} + // SetZookeeperId sets the ZookeeperId field's value. func (s *ZookeeperNodeInfo) SetZookeeperId(v float64) *ZookeeperNodeInfo { s.ZookeeperId = &v @@ -3310,6 +4780,18 @@ const ( BrokerAZDistributionDefault = "DEFAULT" ) +// Client-broker encryption in transit setting. +const ( + // ClientBrokerTls is a ClientBroker enum value + ClientBrokerTls = "TLS" + + // ClientBrokerTlsPlaintext is a ClientBroker enum value + ClientBrokerTlsPlaintext = "TLS_PLAINTEXT" + + // ClientBrokerPlaintext is a ClientBroker enum value + ClientBrokerPlaintext = "PLAINTEXT" +) + // The state of a Kafka cluster. const ( // ClusterStateActive is a ClusterState enum value @@ -3318,6 +4800,9 @@ const ( // ClusterStateCreating is a ClusterState enum value ClusterStateCreating = "CREATING" + // ClusterStateUpdating is a ClusterState enum value + ClusterStateUpdating = "UPDATING" + // ClusterStateDeleting is a ClusterState enum value ClusterStateDeleting = "DELETING" @@ -3326,7 +4811,9 @@ const ( ) // Specifies which metrics are gathered for the MSK cluster. This property has -// three possible values: DEFAULT, PER_BROKER, and PER_TOPIC_PER_BROKER. +// three possible values: DEFAULT, PER_BROKER, and PER_TOPIC_PER_BROKER. For +// a list of the metrics associated with each of these three levels of monitoring, +// see Monitoring (https://docs.aws.amazon.com/msk/latest/developerguide/monitoring.html). const ( // EnhancedMonitoringDefault is a EnhancedMonitoring enum value EnhancedMonitoringDefault = "DEFAULT" diff --git a/vendor/github.com/aws/aws-sdk-go/service/kafka/doc.go b/vendor/github.com/aws/aws-sdk-go/service/kafka/doc.go index 2dd7d82b152..bc1d6eed634 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kafka/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kafka/doc.go @@ -3,8 +3,7 @@ // Package kafka provides the client and types for making API // requests to Managed Streaming for Kafka. // -// The operations for managing an Amazon MSK cluster. This is prerelease documentation -// for a service in preview release. It is subject to change +// The operations for managing an Amazon MSK cluster. // // See https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14 for more information on this service. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/api.go b/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/api.go index 88cc1bc37b4..3f4784369c9 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/api.go @@ -1010,6 +1010,11 @@ type PutObjectInput struct { // The bytes to be stored. // + // To use an non-seekable io.Reader for this request wrap the io.Reader with + // "aws.ReadSeekCloser". The SDK will not retry request errors for non-seekable + // readers. This will allow the SDK to send the reader's payload as chunked + // transfer encoding. + // // Body is a required field Body io.ReadSeeker `type:"blob" required:"true"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/rds/api.go b/vendor/github.com/aws/aws-sdk-go/service/rds/api.go index 584671ac6bc..36ffed6be33 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/rds/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/rds/api.go @@ -2990,7 +2990,7 @@ func (c *RDS) DeleteDBInstanceRequest(input *DeleteDBInstanceInput) (req *reques // // Note that when a DB instance is in a failure state and has a status of failed, // incompatible-restore, or incompatible-network, you can only delete it when -// the SkipFinalSnapshot parameter is set to true. +// you skip creation of the final snapshot with the SkipFinalSnapshot parameter. // // If the specified DB instance is part of an Amazon Aurora DB cluster, you // can't delete the DB instance if both of the following conditions are true: @@ -11085,6 +11085,102 @@ func (c *RDS) RevokeDBSecurityGroupIngressWithContext(ctx aws.Context, input *Re return out, req.Send() } +const opStartActivityStream = "StartActivityStream" + +// StartActivityStreamRequest generates a "aws/request.Request" representing the +// client's request for the StartActivityStream operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartActivityStream for more information on using the StartActivityStream +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartActivityStreamRequest method. +// req, resp := client.StartActivityStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/StartActivityStream +func (c *RDS) StartActivityStreamRequest(input *StartActivityStreamInput) (req *request.Request, output *StartActivityStreamOutput) { + op := &request.Operation{ + Name: opStartActivityStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartActivityStreamInput{} + } + + output = &StartActivityStreamOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartActivityStream API operation for Amazon Relational Database Service. +// +// Starts a database activity stream to monitor activity on the database. For +// more information, see Database Activity Streams (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/DBActivityStreams.html) +// in the Amazon Aurora User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Relational Database Service's +// API operation StartActivityStream for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidDBInstanceStateFault "InvalidDBInstanceState" +// The DB instance isn't in a valid state. +// +// * ErrCodeInvalidDBClusterStateFault "InvalidDBClusterStateFault" +// The requested operation can't be performed while the cluster is in this state. +// +// * ErrCodeResourceNotFoundFault "ResourceNotFoundFault" +// The specified resource ID was not found. +// +// * ErrCodeDBClusterNotFoundFault "DBClusterNotFoundFault" +// DBClusterIdentifier doesn't refer to an existing DB cluster. +// +// * ErrCodeDBInstanceNotFoundFault "DBInstanceNotFound" +// DBInstanceIdentifier doesn't refer to an existing DB instance. +// +// * ErrCodeKMSKeyNotAccessibleFault "KMSKeyNotAccessibleFault" +// An error occurred accessing an AWS KMS key. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/StartActivityStream +func (c *RDS) StartActivityStream(input *StartActivityStreamInput) (*StartActivityStreamOutput, error) { + req, out := c.StartActivityStreamRequest(input) + return out, req.Send() +} + +// StartActivityStreamWithContext is the same as StartActivityStream with the addition of +// the ability to pass a context and additional request options. +// +// See StartActivityStream for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) StartActivityStreamWithContext(ctx aws.Context, input *StartActivityStreamInput, opts ...request.Option) (*StartActivityStreamOutput, error) { + req, out := c.StartActivityStreamRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opStartDBCluster = "StartDBCluster" // StartDBClusterRequest generates a "aws/request.Request" representing the @@ -11301,6 +11397,101 @@ func (c *RDS) StartDBInstanceWithContext(ctx aws.Context, input *StartDBInstance return out, req.Send() } +const opStopActivityStream = "StopActivityStream" + +// StopActivityStreamRequest generates a "aws/request.Request" representing the +// client's request for the StopActivityStream operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StopActivityStream for more information on using the StopActivityStream +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StopActivityStreamRequest method. +// req, resp := client.StopActivityStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/StopActivityStream +func (c *RDS) StopActivityStreamRequest(input *StopActivityStreamInput) (req *request.Request, output *StopActivityStreamOutput) { + op := &request.Operation{ + Name: opStopActivityStream, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopActivityStreamInput{} + } + + output = &StopActivityStreamOutput{} + req = c.newRequest(op, input, output) + return +} + +// StopActivityStream API operation for Amazon Relational Database Service. +// +// Stops a database activity stream that was started using the AWS console, +// the start-activity-stream AWS CLI command, or the StartActivityStream action. +// +// For more information, see Database Activity Streams (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/DBActivityStreams.html) +// in the Amazon Aurora User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Relational Database Service's +// API operation StopActivityStream for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidDBInstanceStateFault "InvalidDBInstanceState" +// The DB instance isn't in a valid state. +// +// * ErrCodeInvalidDBClusterStateFault "InvalidDBClusterStateFault" +// The requested operation can't be performed while the cluster is in this state. +// +// * ErrCodeResourceNotFoundFault "ResourceNotFoundFault" +// The specified resource ID was not found. +// +// * ErrCodeDBClusterNotFoundFault "DBClusterNotFoundFault" +// DBClusterIdentifier doesn't refer to an existing DB cluster. +// +// * ErrCodeDBInstanceNotFoundFault "DBInstanceNotFound" +// DBInstanceIdentifier doesn't refer to an existing DB instance. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/StopActivityStream +func (c *RDS) StopActivityStream(input *StopActivityStreamInput) (*StopActivityStreamOutput, error) { + req, out := c.StopActivityStreamRequest(input) + return out, req.Send() +} + +// StopActivityStreamWithContext is the same as StopActivityStream with the addition of +// the ability to pass a context and additional request options. +// +// See StopActivityStream for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) StopActivityStreamWithContext(ctx aws.Context, input *StopActivityStreamInput, opts ...request.Option) (*StopActivityStreamOutput, error) { + req, out := c.StopActivityStreamRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opStopDBCluster = "StopDBCluster" // StopDBClusterRequest generates a "aws/request.Request" representing the @@ -11495,8 +11686,81 @@ func (c *RDS) StopDBInstanceWithContext(ctx aws.Context, input *StopDBInstanceIn return out, req.Send() } -// Describes a quota for an AWS account, for example, the number of DB instances -// allowed. +// Describes a quota for an AWS account. +// +// The following are account quotas: +// +// * AllocatedStorage - The total allocated storage per account, in GiB. +// The used value is the total allocated storage in the account, in GiB. +// +// * AuthorizationsPerDBSecurityGroup - The number of ingress rules per DB +// security group. The used value is the highest number of ingress rules +// in a DB security group in the account. Other DB security groups in the +// account might have a lower number of ingress rules. +// +// * CustomEndpointsPerDBCluster - The number of custom endpoints per DB +// cluster. The used value is the highest number of custom endpoints in a +// DB clusters in the account. Other DB clusters in the account might have +// a lower number of custom endpoints. +// +// * DBClusterParameterGroups - The number of DB cluster parameter groups +// per account, excluding default parameter groups. The used value is the +// count of nondefault DB cluster parameter groups in the account. +// +// * DBClusterRoles - The number of associated AWS Identity and Access Management +// (IAM) roles per DB cluster. The used value is the highest number of associated +// IAM roles for a DB cluster in the account. Other DB clusters in the account +// might have a lower number of associated IAM roles. +// +// * DBClusters - The number of DB clusters per account. The used value is +// the count of DB clusters in the account. +// +// * DBInstanceRoles - The number of associated IAM roles per DB instance. +// The used value is the highest number of associated IAM roles for a DB +// instance in the account. Other DB instances in the account might have +// a lower number of associated IAM roles. +// +// * DBInstances - The number of DB instances per account. The used value +// is the count of the DB instances in the account. +// +// * DBParameterGroups - The number of DB parameter groups per account, excluding +// default parameter groups. The used value is the count of nondefault DB +// parameter groups in the account. +// +// * DBSecurityGroups - The number of DB security groups (not VPC security +// groups) per account, excluding the default security group. The used value +// is the count of nondefault DB security groups in the account. +// +// * DBSubnetGroups - The number of DB subnet groups per account. The used +// value is the count of the DB subnet groups in the account. +// +// * EventSubscriptions - The number of event subscriptions per account. +// The used value is the count of the event subscriptions in the account. +// +// * ManualSnapshots - The number of manual DB snapshots per account. The +// used value is the count of the manual DB snapshots in the account. +// +// * OptionGroups - The number of DB option groups per account, excluding +// default option groups. The used value is the count of nondefault DB option +// groups in the account. +// +// * ReadReplicasPerMaster - The number of Read Replicas per DB instance. +// The used value is the highest number of Read Replicas for a DB instance +// in the account. Other DB instances in the account might have a lower number +// of Read Replicas. +// +// * ReservedDBInstances - The number of reserved DB instances per account. +// The used value is the count of the active reserved DB instances in the +// account. +// +// * SubnetsPerDBSubnetGroup - The number of subnets per DB subnet group. +// The used value is highest number of subnets for a DB subnet group in the +// account. Other DB subnet groups in the account might have a lower number +// of subnets. +// +// For more information, see Limits (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Limits.html) +// in the Amazon RDS User Guide and Limits (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_Limits.html) +// in the Amazon Aurora User Guide. type AccountQuota struct { _ struct{} `type:"structure"` @@ -12178,13 +12442,16 @@ type BacktrackDBClusterInput struct { // DBClusterIdentifier is a required field DBClusterIdentifier *string `type:"string" required:"true"` - // A value that, if specified, forces the DB cluster to backtrack when binary - // logging is enabled. Otherwise, an error occurs when binary logging is enabled. + // A value that indicates whether to force the DB cluster to backtrack when + // binary logging is enabled. Otherwise, an error occurs when binary logging + // is enabled. Force *bool `type:"boolean"` - // If BacktrackTo is set to a timestamp earlier than the earliest backtrack - // time, this value backtracks the DB cluster to the earliest possible backtrack - // time. Otherwise, an error occurs. + // A value that indicates whether to backtrack the DB cluster to the earliest + // possible backtrack time when BacktrackTo is set to a timestamp earlier than + // the earliest backtrack time. When this parameter is disabled and BacktrackTo + // is set to a timestamp earlier than the earliest backtrack time, an error + // occurs. UseEarliestTimeOnPointInTimeUnavailable *bool `type:"boolean"` } @@ -12594,8 +12861,8 @@ func (s *CopyDBClusterParameterGroupOutput) SetDBClusterParameterGroup(v *DBClus type CopyDBClusterSnapshotInput struct { _ struct{} `type:"structure"` - // True to copy all tags from the source DB cluster snapshot to the target DB - // cluster snapshot, and otherwise false. The default is false. + // A value that indicates whether to copy all tags from the source DB cluster + // snapshot to the target DB cluster snapshot. By default, tags are not copied. CopyTags *bool `type:"boolean"` // DestinationRegion is used for presigning the request to a given region. @@ -12929,8 +13196,8 @@ func (s *CopyDBParameterGroupOutput) SetDBParameterGroup(v *DBParameterGroup) *C type CopyDBSnapshotInput struct { _ struct{} `type:"structure"` - // True to copy all tags from the source DB snapshot to the target DB snapshot, - // and otherwise false. The default is false. + // A value that indicates whether to copy all tags from the source DB snapshot + // to the target DB snapshot. By default, tags are not copied. CopyTags *bool `type:"boolean"` // DestinationRegion is used for presigning the request to a given region. @@ -13532,8 +13799,8 @@ type CreateDBClusterInput struct { // specified CharacterSet. CharacterSetName *string `type:"string"` - // True to copy all tags from the DB cluster to snapshots of the DB cluster, - // and otherwise false. The default is false. + // A value that indicates whether to copy all tags from the DB cluster to snapshots + // of the DB cluster. The default is not to copy them. CopyTagsToSnapshot *bool `type:"boolean"` // The DB cluster identifier. This parameter is stored as a lowercase string. @@ -13573,9 +13840,9 @@ type CreateDBClusterInput struct { // you are creating. DatabaseName *string `type:"string"` - // Indicates whether the DB cluster should have deletion protection enabled. - // The database can't be deleted when this value is set to true. The default - // is false. + // A value that indicates whether the DB cluster has deletion protection enabled. + // The database can't be deleted when deletion protection is enabled. By default, + // deletion protection is disabled. DeletionProtection *bool `type:"boolean"` // DestinationRegion is used for presigning the request to a given region. @@ -13587,10 +13854,8 @@ type CreateDBClusterInput struct { // in the Amazon Aurora User Guide. EnableCloudwatchLogsExports []*string `type:"list"` - // True to enable mapping of AWS Identity and Access Management (IAM) accounts - // to database accounts, and otherwise false. - // - // Default: false + // A value that indicates whether to enable mapping of AWS Identity and Access + // Management (IAM) accounts to database accounts. By default, mapping is disabled. EnableIAMDatabaseAuthentication *bool `type:"boolean"` // The name of the database engine to be used for this DB cluster. @@ -13633,7 +13898,7 @@ type CreateDBClusterInput struct { // Amazon RDS will use the encryption key used to encrypt the source. Otherwise, // Amazon RDS will use your default encryption key. // - // * If the StorageEncrypted parameter is true and ReplicationSourceIdentifier + // * If the StorageEncrypted parameter is enabled and ReplicationSourceIdentifier // is not specified, then Amazon RDS will use your default encryption key. // // AWS KMS creates the default encryption key for your AWS account. Your AWS @@ -13752,7 +14017,7 @@ type CreateDBClusterInput struct { // have the same region as the source ARN. SourceRegion *string `type:"string" ignore:"true"` - // Specifies whether the DB cluster is encrypted. + // A value that indicates whether the DB cluster is encrypted. StorageEncrypted *bool `type:"boolean"` // Tags to assign to the DB cluster. @@ -14240,9 +14505,9 @@ type CreateDBInstanceInput struct { // // Constraints to the amount of storage for each storage type are the following: // - // * General Purpose (SSD) storage (gp2): Must be an integer from 20 to 32768. + // * General Purpose (SSD) storage (gp2): Must be an integer from 20 to 65536. // - // * Provisioned IOPS storage (io1): Must be an integer from 100 to 32768. + // * Provisioned IOPS storage (io1): Must be an integer from 100 to 65536. // // * Magnetic storage (standard): Must be an integer from 5 to 3072. // @@ -14250,9 +14515,9 @@ type CreateDBInstanceInput struct { // // Constraints to the amount of storage for each storage type are the following: // - // * General Purpose (SSD) storage (gp2): Must be an integer from 20 to 32768. + // * General Purpose (SSD) storage (gp2): Must be an integer from 20 to 65536. // - // * Provisioned IOPS storage (io1): Must be an integer from 100 to 32768. + // * Provisioned IOPS storage (io1): Must be an integer from 100 to 65536. // // * Magnetic storage (standard): Must be an integer from 5 to 3072. // @@ -14260,9 +14525,9 @@ type CreateDBInstanceInput struct { // // Constraints to the amount of storage for each storage type are the following: // - // * General Purpose (SSD) storage (gp2): Must be an integer from 20 to 32768. + // * General Purpose (SSD) storage (gp2): Must be an integer from 20 to 65536. // - // * Provisioned IOPS storage (io1): Must be an integer from 100 to 32768. + // * Provisioned IOPS storage (io1): Must be an integer from 100 to 65536. // // * Magnetic storage (standard): Must be an integer from 5 to 3072. // @@ -14270,9 +14535,9 @@ type CreateDBInstanceInput struct { // // Constraints to the amount of storage for each storage type are the following: // - // * General Purpose (SSD) storage (gp2): Must be an integer from 20 to 32768. + // * General Purpose (SSD) storage (gp2): Must be an integer from 20 to 65536. // - // * Provisioned IOPS storage (io1): Must be an integer from 100 to 32768. + // * Provisioned IOPS storage (io1): Must be an integer from 100 to 65536. // // * Magnetic storage (standard): Must be an integer from 10 to 3072. // @@ -14293,10 +14558,9 @@ type CreateDBInstanceInput struct { // from 20 to 1024. AllocatedStorage *int64 `type:"integer"` - // Indicates that minor engine upgrades are applied automatically to the DB - // instance during the maintenance window. - // - // Default: true + // A value that indicates whether minor engine upgrades are applied automatically + // to the DB instance during the maintenance window. By default, minor engine + // upgrades are applied automatically. AutoMinorVersionUpgrade *bool `type:"boolean"` // The Availability Zone (AZ) where the database will be created. For information @@ -14308,8 +14572,8 @@ type CreateDBInstanceInput struct { // // Example: us-east-1d // - // Constraint: The AvailabilityZone parameter can't be specified if the MultiAZ - // parameter is set to true. The specified Availability Zone must be in the + // Constraint: The AvailabilityZone parameter can't be specified if the DB instance + // is a Multi-AZ deployment. The specified Availability Zone must be in the // same AWS Region as the current endpoint. AvailabilityZone *string `type:"string"` @@ -14340,8 +14604,8 @@ type CreateDBInstanceInput struct { // information, see CreateDBCluster. CharacterSetName *string `type:"string"` - // True to copy all tags from the DB instance to snapshots of the DB instance, - // and otherwise false. The default is false. + // A value that indicates whether to copy tags from the DB instance to snapshots + // of the DB instance. By default, tags are not copied. // // Amazon Aurora // @@ -14468,9 +14732,10 @@ type CreateDBInstanceInput struct { // If there is no DB subnet group, then it is a non-VPC DB instance. DBSubnetGroupName *string `type:"string"` - // Indicates if the DB instance should have deletion protection enabled. The - // database can't be deleted when this value is set to true. The default is - // false. For more information, see Deleting a DB Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). + // A value that indicates whether the DB instance has deletion protection enabled. + // The database can't be deleted when deletion protection is enabled. By default, + // deletion protection is disabled. For more information, see Deleting a DB + // Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). DeletionProtection *bool `type:"boolean"` // For an Amazon RDS DB instance that's running Microsoft SQL Server, this parameter @@ -14491,8 +14756,8 @@ type CreateDBInstanceInput struct { // in the Amazon Relational Database Service User Guide. EnableCloudwatchLogsExports []*string `type:"list"` - // True to enable mapping of AWS Identity and Access Management (IAM) accounts - // to database accounts, and otherwise false. + // A value that indicates whether to enable mapping of AWS Identity and Access + // Management (IAM) accounts to database accounts. By default, mapping is disabled. // // You can enable IAM database authentication for the following database engines: // @@ -14506,11 +14771,10 @@ type CreateDBInstanceInput struct { // * For MySQL 5.6, minor version 5.6.34 or higher // // * For MySQL 5.7, minor version 5.7.16 or higher - // - // Default: false EnableIAMDatabaseAuthentication *bool `type:"boolean"` - // True to enable Performance Insights for the DB instance, and otherwise false. + // A value that indicates whether to enable Performance Insights for the DB + // instance. // // For more information, see Using Amazon Performance Insights (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) // in the Amazon Relational Database Service User Guide. @@ -14614,10 +14878,10 @@ type CreateDBInstanceInput struct { // Not applicable. The KMS key identifier is managed by the DB cluster. For // more information, see CreateDBCluster. // - // If the StorageEncrypted parameter is true, and you do not specify a value - // for the KmsKeyId parameter, then Amazon RDS will use your default encryption - // key. AWS KMS creates the default encryption key for your AWS account. Your - // AWS account has a different default encryption key for each AWS Region. + // If StorageEncrypted is enabled, and you do not specify a value for the KmsKeyId + // parameter, then Amazon RDS will use your default encryption key. AWS KMS + // creates the default encryption key for your AWS account. Your AWS account + // has a different default encryption key for each AWS Region. KmsKeyId *string `type:"string"` // License model information for this DB instance. @@ -14738,9 +15002,9 @@ type CreateDBInstanceInput struct { // a MonitoringRoleArn value. MonitoringRoleArn *string `type:"string"` - // A value that specifies whether the DB instance is a Multi-AZ deployment. - // You can't set the AvailabilityZone parameter if the MultiAZ parameter is - // set to true. + // A value that indicates whether the DB instance is a Multi-AZ deployment. + // You can't set the AvailabilityZone parameter if the DB instance is a Multi-AZ + // deployment. MultiAZ *bool `type:"boolean"` // Indicates that the DB instance should be associated with the specified option @@ -14754,6 +15018,11 @@ type CreateDBInstanceInput struct { // The AWS KMS key identifier for encryption of Performance Insights data. The // KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the // KMS key alias for the KMS encryption key. + // + // If you do not specify a value for PerformanceInsightsKMSKeyId, then Amazon + // RDS uses your default encryption key. AWS KMS creates the default encryption + // key for your AWS account. Your AWS account has a different default encryption + // key for each AWS Region. PerformanceInsightsKMSKeyId *string `type:"string"` // The amount of time, in days, to retain Performance Insights data. Valid values @@ -14862,10 +15131,11 @@ type CreateDBInstanceInput struct { // Valid Values: 0 - 15 PromotionTier *int64 `type:"integer"` - // Specifies the accessibility options for the DB instance. A value of true - // specifies an Internet-facing instance with a publicly resolvable DNS name, - // which resolves to a public IP address. A value of false specifies an internal - // instance with a DNS name that resolves to a private IP address. + // A value that indicates whether the DB instance is publicly accessible. When + // the DB instance is publicly accessible, it is an Internet-facing instance + // with a publicly resolvable DNS name, which resolves to a public IP address. + // When the DB instance is not publicly accessible, it is an internal instance + // with a DNS name that resolves to a private IP address. // // Default: The default behavior varies depending on whether DBSubnetGroupName // is specified. @@ -14889,13 +15159,12 @@ type CreateDBInstanceInput struct { // to it, the DB instance is public. PubliclyAccessible *bool `type:"boolean"` - // Specifies whether the DB instance is encrypted. + // A value that indicates whether the DB instance is encrypted. By default, + // it is not encrypted. // // Amazon Aurora // // Not applicable. The encryption for DB instances is managed by the DB cluster. - // - // Default: false StorageEncrypted *bool `type:"boolean"` // Specifies the storage type to be associated with the DB instance. @@ -14904,7 +15173,7 @@ type CreateDBInstanceInput struct { // // If you specify io1, you must also include a value for the Iops parameter. // - // Default: io1 if the Iops parameter is specified, otherwise standard + // Default: io1 if the Iops parameter is specified, otherwise gp2 StorageType *string `type:"string"` // Tags to assign to the DB instance. @@ -15259,8 +15528,8 @@ func (s *CreateDBInstanceOutput) SetDBInstance(v *DBInstance) *CreateDBInstanceO type CreateDBInstanceReadReplicaInput struct { _ struct{} `type:"structure"` - // Indicates that minor engine upgrades are applied automatically to the Read - // Replica during the maintenance window. + // A value that indicates whether minor engine upgrades are applied automatically + // to the Read Replica during the maintenance window. // // Default: Inherits from the source DB instance AutoMinorVersionUpgrade *bool `type:"boolean"` @@ -15273,8 +15542,8 @@ type CreateDBInstanceReadReplicaInput struct { // Example: us-east-1d AvailabilityZone *string `type:"string"` - // True to copy all tags from the Read Replica to snapshots of the Read Replica, - // and otherwise false. The default is false. + // A value that indicates whether to copy all tags from the Read Replica to + // snapshots of the Read Replica. By default, tags are not copied. CopyTagsToSnapshot *bool `type:"boolean"` // The compute and memory capacity of the Read Replica, for example, db.m4.large. @@ -15315,9 +15584,10 @@ type CreateDBInstanceReadReplicaInput struct { // Example: mySubnetgroup DBSubnetGroupName *string `type:"string"` - // Indicates if the DB instance should have deletion protection enabled. The - // database can't be deleted when this value is set to true. The default is - // false. For more information, see Deleting a DB Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). + // A value that indicates whether the DB instance has deletion protection enabled. + // The database can't be deleted when deletion protection is enabled. By default, + // deletion protection is disabled. For more information, see Deleting a DB + // Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). DeletionProtection *bool `type:"boolean"` // DestinationRegion is used for presigning the request to a given region. @@ -15329,8 +15599,8 @@ type CreateDBInstanceReadReplicaInput struct { // in the Amazon RDS User Guide. EnableCloudwatchLogsExports []*string `type:"list"` - // True to enable mapping of AWS Identity and Access Management (IAM) accounts - // to database accounts, and otherwise false. + // A value that indicates whether to enable mapping of AWS Identity and Access + // Management (IAM) accounts to database accounts. By default, mapping is disabled. // // You can enable IAM database authentication for the following database engines // @@ -15339,11 +15609,10 @@ type CreateDBInstanceReadReplicaInput struct { // * For MySQL 5.7, minor version 5.7.16 or higher // // * Aurora MySQL 5.6 or higher - // - // Default: false EnableIAMDatabaseAuthentication *bool `type:"boolean"` - // True to enable Performance Insights for the Read Replica, and otherwise false. + // A value that indicates whether to enable Performance Insights for the Read + // Replica. // // For more information, see Using Amazon Performance Insights (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) // in the Amazon RDS User Guide. @@ -15389,7 +15658,7 @@ type CreateDBInstanceReadReplicaInput struct { // a MonitoringRoleArn value. MonitoringRoleArn *string `type:"string"` - // Specifies whether the Read Replica is in a Multi-AZ deployment. + // A value that indicates whether the Read Replica is in a Multi-AZ deployment. // // You can create a Read Replica as a Multi-AZ DB instance. RDS creates a standby // of your replica in another Availability Zone for failover support for the @@ -15404,6 +15673,11 @@ type CreateDBInstanceReadReplicaInput struct { // The AWS KMS key identifier for encryption of Performance Insights data. The // KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the // KMS key alias for the KMS encryption key. + // + // If you do not specify a value for PerformanceInsightsKMSKeyId, then Amazon + // RDS uses your default encryption key. AWS KMS creates the default encryption + // key for your AWS account. Your AWS account has a different default encryption + // key for each AWS Region. PerformanceInsightsKMSKeyId *string `type:"string"` // The amount of time, in days, to retain Performance Insights data. Valid values @@ -15461,11 +15735,12 @@ type CreateDBInstanceReadReplicaInput struct { // class of the DB instance. ProcessorFeatures []*ProcessorFeature `locationNameList:"ProcessorFeature" type:"list"` - // Specifies the accessibility options for the DB instance. A value of true - // specifies an Internet-facing instance with a publicly resolvable DNS name, - // which resolves to a public IP address. A value of false specifies an internal - // instance with a DNS name that resolves to a private IP address. For more - // information, see CreateDBInstance. + // A value that indicates whether the DB instance is publicly accessible. When + // the DB instance is publicly accessible, it is an Internet-facing instance + // with a publicly resolvable DNS name, which resolves to a public IP address. + // When the DB instance is not publicly accessible, it is an internal instance + // with a DNS name that resolves to a private IP address. For more information, + // see CreateDBInstance. PubliclyAccessible *bool `type:"boolean"` // The identifier of the DB instance that will act as the source for the Read @@ -15512,14 +15787,14 @@ type CreateDBInstanceReadReplicaInput struct { // // If you specify io1, you must also include a value for the Iops parameter. // - // Default: io1 if the Iops parameter is specified, otherwise standard + // Default: io1 if the Iops parameter is specified, otherwise gp2 StorageType *string `type:"string"` // A list of tags. For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) // in the Amazon RDS User Guide. Tags []*Tag `locationNameList:"Tag" type:"list"` - // A value that specifies that the DB instance class of the DB instance uses + // A value that indicates whether the DB instance class of the DB instance uses // its default processor features. UseDefaultProcessorFeatures *bool `type:"boolean"` @@ -16186,8 +16461,9 @@ func (s *CreateDBSubnetGroupOutput) SetDBSubnetGroup(v *DBSubnetGroup) *CreateDB type CreateEventSubscriptionInput struct { _ struct{} `type:"structure"` - // A Boolean value; set to true to activate the subscription, set to false to - // create the subscription but not active it. + // A value that indicates whether to activate the subscription. If the event + // notification subscription is not activated, the subscription is created but + // not active. Enabled *bool `type:"boolean"` // A list of event categories for a SourceType that you want to subscribe to. @@ -16347,7 +16623,7 @@ type CreateGlobalClusterInput struct { DatabaseName *string `type:"string"` // The deletion protection setting for the new global database. The global database - // can't be deleted when this value is set to true. + // can't be deleted when deletion protection is enabled. DeletionProtection *bool `type:"boolean"` // Provides the name of the database engine to be used for this DB cluster. @@ -16572,6 +16848,22 @@ func (s *CreateOptionGroupOutput) SetOptionGroup(v *OptionGroup) *CreateOptionGr type DBCluster struct { _ struct{} `type:"structure"` + // The name of the Amazon Kinesis data stream used for the database activity + // stream. + ActivityStreamKinesisStreamName *string `type:"string"` + + // The AWS KMS key identifier used for encrypting messages in the database activity + // stream. + ActivityStreamKmsKeyId *string `type:"string"` + + // The mode of the database activity stream. Database events such as a change + // or access generate an activity stream event. The database session can handle + // these events either synchronously or asynchronously. + ActivityStreamMode *string `type:"string" enum:"ActivityStreamMode"` + + // The status of the database activity stream. + ActivityStreamStatus *string `type:"string" enum:"ActivityStreamStatus"` + // For all database engines except Amazon Aurora, AllocatedStorage specifies // the allocated storage size in gibibytes (GiB). For Aurora, AllocatedStorage // always returns 1, because Aurora DB cluster storage size is not fixed, but @@ -16655,7 +16947,7 @@ type DBCluster struct { DbClusterResourceId *string `type:"string"` // Indicates if the DB cluster has deletion protection enabled. The database - // can't be deleted when this value is set to true. + // can't be deleted when deletion protection is enabled. DeletionProtection *bool `type:"boolean"` // The earliest time to which a DB cluster can be backtracked. @@ -16689,27 +16981,22 @@ type DBCluster struct { // Specifies the ID that Amazon Route 53 assigns when you create a hosted zone. HostedZoneId *string `type:"string"` - // - // HTTP endpoint functionality is in beta for Aurora Serverless and is subject - // to change. - // - // Value that is true if the HTTP endpoint for an Aurora Serverless DB cluster - // is enabled and false otherwise. + // A value that indicates whether the HTTP endpoint for an Aurora Serverless + // DB cluster is enabled. // // When enabled, the HTTP endpoint provides a connectionless web service API // for running SQL queries on the Aurora Serverless DB cluster. You can also // query your database from inside the RDS console with the query editor. // - // For more information about Aurora Serverless, see Using Amazon Aurora Serverless - // (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html) + // For more information, see Using the Data API for Aurora Serverless (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html) // in the Amazon Aurora User Guide. HttpEndpointEnabled *bool `type:"boolean"` - // True if mapping of AWS Identity and Access Management (IAM) accounts to database - // accounts is enabled, and otherwise false. + // A value that indicates whether the mapping of AWS Identity and Access Management + // (IAM) accounts to database accounts is enabled. IAMDatabaseAuthenticationEnabled *bool `type:"boolean"` - // If StorageEncrypted is true, the AWS KMS key identifier for the encrypted + // If StorageEncrypted is enabled, the AWS KMS key identifier for the encrypted // DB cluster. KmsKeyId *string `type:"string"` @@ -16785,6 +17072,30 @@ func (s DBCluster) GoString() string { return s.String() } +// SetActivityStreamKinesisStreamName sets the ActivityStreamKinesisStreamName field's value. +func (s *DBCluster) SetActivityStreamKinesisStreamName(v string) *DBCluster { + s.ActivityStreamKinesisStreamName = &v + return s +} + +// SetActivityStreamKmsKeyId sets the ActivityStreamKmsKeyId field's value. +func (s *DBCluster) SetActivityStreamKmsKeyId(v string) *DBCluster { + s.ActivityStreamKmsKeyId = &v + return s +} + +// SetActivityStreamMode sets the ActivityStreamMode field's value. +func (s *DBCluster) SetActivityStreamMode(v string) *DBCluster { + s.ActivityStreamMode = &v + return s +} + +// SetActivityStreamStatus sets the ActivityStreamStatus field's value. +func (s *DBCluster) SetActivityStreamStatus(v string) *DBCluster { + s.ActivityStreamStatus = &v + return s +} + // SetAllocatedStorage sets the AllocatedStorage field's value. func (s *DBCluster) SetAllocatedStorage(v int64) *DBCluster { s.AllocatedStorage = &v @@ -17196,8 +17507,8 @@ type DBClusterMember struct { // Specifies the instance identifier for this member of the DB cluster. DBInstanceIdentifier *string `type:"string"` - // Value that is true if the cluster member is the primary instance for the - // DB cluster and false otherwise. + // A value that indicates whehter the cluster member is the primary instance + // for the DB cluster. IsClusterWriter *bool `type:"boolean"` // A value that specifies the order in which an Aurora Replica is promoted to @@ -17964,8 +18275,8 @@ type DBInstance struct { DbiResourceId *string `type:"string"` // Indicates if the DB instance has deletion protection enabled. The database - // can't be deleted when this value is set to true. For more information, see - // Deleting a DB Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). + // can't be deleted when deletion protection is enabled. For more information, + // see Deleting a DB Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). DeletionProtection *bool `type:"boolean"` // The Active Directory Domain membership records associated with the DB instance. @@ -19699,10 +20010,10 @@ type DeleteDBClusterInput struct { DBClusterIdentifier *string `type:"string" required:"true"` // The DB cluster snapshot identifier of the new DB cluster snapshot created - // when SkipFinalSnapshot is set to false. + // when SkipFinalSnapshot is disabled. // - // Specifying this parameter and also setting the SkipFinalShapshot parameter - // to true results in an error. + // Specifying this parameter and also skipping the creation of a final DB cluster + // snapshot with the SkipFinalShapshot parameter results in an error. // // Constraints: // @@ -19713,14 +20024,14 @@ type DeleteDBClusterInput struct { // * Can't end with a hyphen or contain two consecutive hyphens FinalDBSnapshotIdentifier *string `type:"string"` - // Determines whether a final DB cluster snapshot is created before the DB cluster - // is deleted. If true is specified, no DB cluster snapshot is created. If false - // is specified, a DB cluster snapshot is created before the DB cluster is deleted. + // A value that indicates whether to skip the creation of a final DB cluster + // snapshot before the DB cluster is deleted. If skip is specified, no DB cluster + // snapshot is created. If skip is not specified, a DB cluster snapshot is created + // before the DB cluster is deleted. By default, skip is not specified, and + // the DB cluster snapshot is created. By default, this parameter is disabled. // // You must specify a FinalDBSnapshotIdentifier parameter if SkipFinalSnapshot - // is false. - // - // Default: false + // is disabled. SkipFinalSnapshot *bool `type:"boolean"` } @@ -19997,15 +20308,15 @@ type DeleteDBInstanceInput struct { DBInstanceIdentifier *string `type:"string" required:"true"` // A value that indicates whether to remove automated backups immediately after - // the DB instance is deleted. This parameter isn't case-sensitive. This parameter - // defaults to true. + // the DB instance is deleted. This parameter isn't case-sensitive. The default + // is to remove automated backups immediately after the DB instance is deleted. DeleteAutomatedBackups *bool `type:"boolean"` - // The DBSnapshotIdentifier of the new DB snapshot created when SkipFinalSnapshot - // is set to false. + // The DBSnapshotIdentifier of the new DBSnapshot created when the SkipFinalSnapshot + // parameter is disabled. // - // Specifying this parameter and also setting the SkipFinalShapshot parameter - // to true results in an error. + // Specifying this parameter and also specifying to skip final DB snapshot creation + // in SkipFinalShapshot results in an error. // // Constraints: // @@ -20018,21 +20329,20 @@ type DeleteDBInstanceInput struct { // * Can't be specified when deleting a Read Replica. FinalDBSnapshotIdentifier *string `type:"string"` - // A value that indicates whether a final DB snapshot is created before the - // DB instance is deleted. If true is specified, no DB snapshot is created. - // If false is specified, a DB snapshot is created before the DB instance is - // deleted. - // - // When a DB instance is in a failure state and has a status of failed, incompatible-restore, - // or incompatible-network, you can only delete it when the SkipFinalSnapshot - // parameter is set to true. + // A value that indicates whether to skip the creation of a final DB snapshot + // before the DB instance is deleted. If skip is specified, no DB snapshot is + // created. If skip is not specified, a DB snapshot is created before the DB + // instance is deleted. By default, skip is not specified, and the DB snapshot + // is created. // - // Specify true when deleting a Read Replica. + // Note that when a DB instance is in a failure state and has a status of 'failed', + // 'incompatible-restore', or 'incompatible-network', it can only be deleted + // when skip is specified. // - // The FinalDBSnapshotIdentifier parameter must be specified if SkipFinalSnapshot - // is false. + // Specify skip when deleting a Read Replica. // - // Default: false + // The FinalDBSnapshotIdentifier parameter must be specified if skip is not + // specified. SkipFinalSnapshot *bool `type:"boolean"` } @@ -21323,17 +21633,17 @@ type DescribeDBClusterSnapshotsInput struct { // This parameter is not currently supported. Filters []*Filter `locationNameList:"Filter" type:"list"` - // True to include manual DB cluster snapshots that are public and can be copied - // or restored by any AWS account, and otherwise false. The default is false. - // The default is false. + // A value that indicates whether to include manual DB cluster snapshots that + // are public and can be copied or restored by any AWS account. By default, + // the public snapshots are not included. // // You can share a manual DB cluster snapshot as public by using the ModifyDBClusterSnapshotAttribute // API action. IncludePublic *bool `type:"boolean"` - // True to include shared manual DB cluster snapshots from other AWS accounts - // that this AWS account has been given permission to copy or restore, and otherwise - // false. The default is false. + // A value that indicates whether to include shared manual DB cluster snapshots + // from other AWS accounts that this AWS account has been given permission to + // copy or restore. By default, these snapshots are not included. // // You can give an AWS account permission to restore a manual DB cluster snapshot // from another AWS account by the ModifyDBClusterSnapshotAttribute API action. @@ -21369,9 +21679,9 @@ type DescribeDBClusterSnapshotsInput struct { // // If you don't specify a SnapshotType value, then both automated and manual // DB cluster snapshots are returned. You can include shared DB cluster snapshots - // with these results by setting the IncludeShared parameter to true. You can - // include public DB cluster snapshots with these results by setting the IncludePublic - // parameter to true. + // with these results by enabling the IncludeShared parameter. You can include + // public DB cluster snapshots with these results by enabling the IncludePublic + // parameter. // // The IncludeShared and IncludePublic parameters don't apply for SnapshotType // values of manual or automated. The IncludePublic parameter doesn't apply @@ -21628,8 +21938,8 @@ type DescribeDBEngineVersionsInput struct { // * If supplied, must match an existing DBParameterGroupFamily. DBParameterGroupFamily *string `type:"string"` - // Indicates that only the default version of the specified engine or engine - // and major version combination is returned. + // A value that indicates whether only the default version of the specified + // engine or engine and major version combination is returned. DefaultOnly *bool `type:"boolean"` // The database engine to return. @@ -21643,16 +21953,22 @@ type DescribeDBEngineVersionsInput struct { // This parameter is not currently supported. Filters []*Filter `locationNameList:"Filter" type:"list"` - // Whether to include non-available engine versions in the list. The default - // is to list only available engine versions. + // A value that indicates whether to include engine versions that aren't available + // in the list. The default is to list only available engine versions. IncludeAll *bool `type:"boolean"` - // If this parameter is specified and the requested engine supports the CharacterSetName + // A value that indicates whether to list the supported character sets for each + // engine version. + // + // If this parameter is enabled and the requested engine supports the CharacterSetName // parameter for CreateDBInstance, the response includes a list of supported // character sets for each engine version. ListSupportedCharacterSets *bool `type:"boolean"` - // If this parameter is specified and the requested engine supports the TimeZone + // A value that indicates whether to list the supported time zones for each + // engine version. + // + // If this parameter is enabled and the requested engine supports the TimeZone // parameter for CreateDBInstance, the response includes a list of supported // time zones for each engine version. ListSupportedTimezones *bool `type:"boolean"` @@ -22720,16 +23036,17 @@ type DescribeDBSnapshotsInput struct { // This parameter is not currently supported. Filters []*Filter `locationNameList:"Filter" type:"list"` - // True to include manual DB snapshots that are public and can be copied or - // restored by any AWS account, and otherwise false. The default is false. + // A value that indicates whether to include manual DB cluster snapshots that + // are public and can be copied or restored by any AWS account. By default, + // the public snapshots are not included. // // You can share a manual DB snapshot as public by using the ModifyDBSnapshotAttribute // API. IncludePublic *bool `type:"boolean"` - // True to include shared manual DB snapshots from other AWS accounts that this - // AWS account has been given permission to copy or restore, and otherwise false. - // The default is false. + // A value that indicates whether to include shared manual DB cluster snapshots + // from other AWS accounts that this AWS account has been given permission to + // copy or restore. By default, these snapshots are not included. // // You can give an AWS account permission to restore a manual DB snapshot from // another AWS account by using the ModifyDBSnapshotAttribute API action. @@ -22770,8 +23087,8 @@ type DescribeDBSnapshotsInput struct { // If you don't specify a SnapshotType value, then both automated and manual // snapshots are returned. Shared and public DB snapshots are not included in // the returned results by default. You can include shared snapshots with these - // results by setting the IncludeShared parameter to true. You can include public - // snapshots with these results by setting the IncludePublic parameter to true. + // results by enabling the IncludeShared parameter. You can include public snapshots + // with these results by enabling the IncludePublic parameter. // // The IncludeShared and IncludePublic parameters don't apply for SnapshotType // values of manual or automated. The IncludePublic parameter doesn't apply @@ -24031,8 +24348,7 @@ type DescribeOrderableDBInstanceOptionsInput struct { // Constraints: Minimum 20, maximum 100. MaxRecords *int64 `type:"integer"` - // The VPC filter value. Specify this parameter to show only the available VPC - // or non-VPC offerings. + // A value that indicates whether to show only VPC or non-VPC offerings. Vpc *bool `type:"boolean"` } @@ -24308,8 +24624,8 @@ type DescribeReservedDBInstancesInput struct { // Constraints: Minimum 20, maximum 100. MaxRecords *int64 `type:"integer"` - // The Multi-AZ filter value. Specify this parameter to show only those reservations - // matching the specified Multi-AZ parameter. + // A value that indicates whether to show only those reservations that support + // Multi-AZ. MultiAZ *bool `type:"boolean"` // The offering type filter value. Specify this parameter to show only the available @@ -24451,8 +24767,8 @@ type DescribeReservedDBInstancesOfferingsInput struct { // Constraints: Minimum 20, maximum 100. MaxRecords *int64 `type:"integer"` - // The Multi-AZ filter value. Specify this parameter to show only the available - // offerings matching the specified Multi-AZ parameter. + // A value that indicates whether to show only those reservations that support + // Multi-AZ. MultiAZ *bool `type:"boolean"` // The offering type filter value. Specify this parameter to show only the available @@ -25882,7 +26198,7 @@ type ModifyCurrentDBClusterCapacityInput struct { // // Constraints: // - // * Value must be 2, 4, 8, 16, 32, 64, 128, or 256. + // * Value must be 1, 2, 4, 8, 16, 32, 64, 128, or 256. Capacity *int64 `type:"integer"` // The DB cluster identifier for the cluster being modified. This parameter @@ -26217,20 +26533,20 @@ func (s *ModifyDBClusterEndpointOutput) SetStatus(v string) *ModifyDBClusterEndp type ModifyDBClusterInput struct { _ struct{} `type:"structure"` - // A value that specifies whether the modifications in this request and any + // A value that indicates whether the modifications in this request and any // pending modifications are asynchronously applied as soon as possible, regardless // of the PreferredMaintenanceWindow setting for the DB cluster. If this parameter - // is set to false, changes to the DB cluster are applied during the next maintenance + // is disabled, changes to the DB cluster are applied during the next maintenance // window. // // The ApplyImmediately parameter only affects the EnableIAMDatabaseAuthentication, - // MasterUserPassword, and NewDBClusterIdentifier values. If you set the ApplyImmediately - // parameter value to false, then changes to the EnableIAMDatabaseAuthentication, + // MasterUserPassword, and NewDBClusterIdentifier values. If the ApplyImmediately + // parameter is disabled, then changes to the EnableIAMDatabaseAuthentication, // MasterUserPassword, and NewDBClusterIdentifier values are applied during // the next maintenance window. All other changes are applied immediately, regardless // of the value of the ApplyImmediately parameter. // - // Default: false + // By default, this parameter is disabled. ApplyImmediately *bool `type:"boolean"` // The target backtrack window, in seconds. To disable backtracking, set this @@ -26258,8 +26574,8 @@ type ModifyDBClusterInput struct { // Logs for a specific DB cluster. CloudwatchLogsExportConfiguration *CloudwatchLogsExportConfiguration `type:"structure"` - // True to copy all tags from the DB cluster to snapshots of the DB cluster, - // and otherwise false. The default is false. + // A value that indicates whether to copy all tags from the DB cluster to snapshots + // of the DB cluster. The default is not to copy them. CopyTagsToSnapshot *bool `type:"boolean"` // The DB cluster identifier for the cluster being modified. This parameter @@ -26275,14 +26591,11 @@ type ModifyDBClusterInput struct { // The name of the DB cluster parameter group to use for the DB cluster. DBClusterParameterGroupName *string `type:"string"` - // Indicates if the DB cluster has deletion protection enabled. The database - // can't be deleted when this value is set to true. + // A value that indicates whether the DB cluster has deletion protection enabled. + // The database can't be deleted when deletion protection is enabled. By default, + // deletion protection is disabled. DeletionProtection *bool `type:"boolean"` - // - // HTTP endpoint functionality is in beta for Aurora Serverless and is subject - // to change. - // // A value that indicates whether to enable the HTTP endpoint for an Aurora // Serverless DB cluster. By default, the HTTP endpoint is disabled. // @@ -26290,22 +26603,19 @@ type ModifyDBClusterInput struct { // for running SQL queries on the Aurora Serverless DB cluster. You can also // query your database from inside the RDS console with the query editor. // - // For more information about Aurora Serverless, see Using Amazon Aurora Serverless - // (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html) + // For more information, see Using the Data API for Aurora Serverless (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html) // in the Amazon Aurora User Guide. EnableHttpEndpoint *bool `type:"boolean"` - // True to enable mapping of AWS Identity and Access Management (IAM) accounts - // to database accounts, and otherwise false. - // - // Default: false + // A value that indicates whether to enable mapping of AWS Identity and Access + // Management (IAM) accounts to database accounts. By default, mapping is disabled. EnableIAMDatabaseAuthentication *bool `type:"boolean"` // The version number of the database engine to which you want to upgrade. Changing // this parameter results in an outage. The change is applied during the next - // maintenance window unless the ApplyImmediately parameter is set to true. + // maintenance window unless ApplyImmediately is enabled. // - // For a list of valid engine versions, use the DescribeDBEngineVersions action. + // For a list of valid engine versions, use DescribeDBEngineVersions. EngineVersion *string `type:"string"` // The new password for the master database user. This password can contain @@ -26331,10 +26641,10 @@ type ModifyDBClusterInput struct { // A value that indicates that the DB cluster should be associated with the // specified option group. Changing this parameter doesn't result in an outage // except in the following case, and the change is applied during the next maintenance - // window unless the ApplyImmediately parameter is set to true for this request. - // If the parameter change results in an option group that enables OEM, this - // change can cause a brief (sub-second) period during which new connections - // are rejected but existing connections are not interrupted. + // window unless the ApplyImmediately is enabled for this request. If the parameter + // change results in an option group that enables OEM, this change can cause + // a brief (sub-second) period during which new connections are rejected but + // existing connections are not interrupted. // // Permanent options can't be removed from an option group. The option group // can't be removed from a DB cluster once it is associated with a DB cluster. @@ -26734,36 +27044,34 @@ type ModifyDBInstanceInput struct { // For the valid values for allocated storage for each engine, see CreateDBInstance. AllocatedStorage *int64 `type:"integer"` - // Indicates that major version upgrades are allowed. Changing this parameter - // doesn't result in an outage and the change is asynchronously applied as soon - // as possible. + // A value that indicates whether major version upgrades are allowed. Changing + // this parameter doesn't result in an outage and the change is asynchronously + // applied as soon as possible. // - // Constraints: This parameter must be set to true when specifying a value for - // the EngineVersion parameter that is a different major version than the DB - // instance's current version. + // Constraints: Major version upgrades must be allowed when specifying a value + // for the EngineVersion parameter that is a different major version than the + // DB instance's current version. AllowMajorVersionUpgrade *bool `type:"boolean"` - // Specifies whether the modifications in this request and any pending modifications - // are asynchronously applied as soon as possible, regardless of the PreferredMaintenanceWindow - // setting for the DB instance. - // - // If this parameter is set to false, changes to the DB instance are applied - // during the next maintenance window. Some parameter changes can cause an outage - // and are applied on the next call to RebootDBInstance, or the next failure - // reboot. Review the table of parameters in Modifying a DB Instance and Using - // the Apply Immediately Parameter (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html) - // in the Amazon RDS User Guide. to see the impact that setting ApplyImmediately - // to true or false has for each modified parameter and to determine when the - // changes are applied. - // - // Default: false + // A value that indicates whether the modifications in this request and any + // pending modifications are asynchronously applied as soon as possible, regardless + // of the PreferredMaintenanceWindow setting for the DB instance. By default, + // this parameter is disabled. + // + // If this parameter is disabled, changes to the DB instance are applied during + // the next maintenance window. Some parameter changes can cause an outage and + // are applied on the next call to RebootDBInstance, or the next failure reboot. + // Review the table of parameters in Modifying a DB Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html) + // in the Amazon RDS User Guide. to see the impact of enabling or disabling + // ApplyImmediately for each modified parameter and to determine when the changes + // are applied. ApplyImmediately *bool `type:"boolean"` - // Indicates that minor version upgrades are applied automatically to the DB - // instance during the maintenance window. Changing this parameter doesn't result - // in an outage except in the following case and the change is asynchronously - // applied as soon as possible. An outage will result if this parameter is set - // to true during the maintenance window, and a newer minor version is available, + // A value that indicates whether minor version upgrades are applied automatically + // to the DB instance during the maintenance window. Changing this parameter + // doesn't result in an outage except in the following case and the change is + // asynchronously applied as soon as possible. An outage results if this parameter + // is enabled during the maintenance window, and a newer minor version is available, // and RDS has enabled auto patching for that engine version. AutoMinorVersionUpgrade *bool `type:"boolean"` @@ -26773,10 +27081,9 @@ type ModifyDBInstanceInput struct { // // Changing this parameter can result in an outage if you change from 0 to a // non-zero value or from a non-zero value to 0. These changes are applied during - // the next maintenance window unless the ApplyImmediately parameter is set - // to true for this request. If you change the parameter from one non-zero value - // to another non-zero value, the change is asynchronously applied as soon as - // possible. + // the next maintenance window unless the ApplyImmediately parameter is enabled + // for this request. If you change the parameter from one non-zero value to + // another non-zero value, the change is asynchronously applied as soon as possible. // // Amazon Aurora // @@ -26809,8 +27116,8 @@ type ModifyDBInstanceInput struct { // has no effect. CloudwatchLogsExportConfiguration *CloudwatchLogsExportConfiguration `type:"structure"` - // True to copy all tags from the DB instance to snapshots of the DB instance, - // and otherwise false. The default is false. + // A value that indicates whether to copy all tags from the DB instance to snapshots + // of the DB instance. By default, tags are not copied. // // Amazon Aurora // @@ -26827,7 +27134,7 @@ type ModifyDBInstanceInput struct { // // If you modify the DB instance class, an outage occurs during the change. // The change is applied during the next maintenance window, unless ApplyImmediately - // is specified as true for this request. + // is enabled for this request. // // Default: Uses existing setting DBInstanceClass *string `type:"string"` @@ -26918,17 +27225,17 @@ type ModifyDBInstanceInput struct { // in the Amazon RDS User Guide. // // Changing the subnet group causes an outage during the change. The change - // is applied during the next maintenance window, unless you specify true for - // the ApplyImmediately parameter. + // is applied during the next maintenance window, unless you enable ApplyImmediately. // // Constraints: If supplied, must match the name of an existing DBSubnetGroup. // // Example: mySubnetGroup DBSubnetGroupName *string `type:"string"` - // Indicates if the DB instance has deletion protection enabled. The database - // can't be deleted when this value is set to true. For more information, see - // Deleting a DB Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). + // A value that indicates whether the DB instance has deletion protection enabled. + // The database can't be deleted when deletion protection is enabled. By default, + // deletion protection is disabled. For more information, see Deleting a DB + // Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). DeletionProtection *bool `type:"boolean"` // The Active Directory Domain to move the instance to. Specify none to remove @@ -26940,8 +27247,8 @@ type ModifyDBInstanceInput struct { // The name of the IAM role to use when making API calls to the Directory Service. DomainIAMRoleName *string `type:"string"` - // True to enable mapping of AWS Identity and Access Management (IAM) accounts - // to database accounts, and otherwise false. + // A value that indicates whether to enable mapping of AWS Identity and Access + // Management (IAM) accounts to database accounts. By default, mapping is disabled. // // You can enable IAM database authentication for the following database engines // @@ -26955,11 +27262,10 @@ type ModifyDBInstanceInput struct { // * For MySQL 5.6, minor version 5.6.34 or higher // // * For MySQL 5.7, minor version 5.7.16 or higher - // - // Default: false EnableIAMDatabaseAuthentication *bool `type:"boolean"` - // True to enable Performance Insights for the DB instance, and otherwise false. + // A value that indicates whether to enable Performance Insights for the DB + // instance. // // For more information, see Using Amazon Performance Insights (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) // in the Amazon Relational Database Service User Guide. @@ -26967,7 +27273,7 @@ type ModifyDBInstanceInput struct { // The version number of the database engine to upgrade to. Changing this parameter // results in an outage and the change is applied during the next maintenance - // window unless the ApplyImmediately parameter is set to true for this request. + // window unless the ApplyImmediately parameter is eanbled for this request. // // For major version upgrades, if a nondefault DB parameter group is currently // in use, a new DB parameter group in the DB parameter group family for the @@ -26982,9 +27288,9 @@ type ModifyDBInstanceInput struct { // // Changing this setting doesn't result in an outage and the change is applied // during the next maintenance window unless the ApplyImmediately parameter - // is set to true for this request. If you are migrating from Provisioned IOPS - // to standard storage, set this value to 0. The DB instance will require a - // reboot for the change in storage type to take effect. + // is enabled for this request. If you are migrating from Provisioned IOPS to + // standard storage, set this value to 0. The DB instance will require a reboot + // for the change in storage type to take effect. // // If you choose to migrate your DB instance from using standard storage to // using Provisioned IOPS, or from using Provisioned IOPS to using standard @@ -27072,16 +27378,17 @@ type ModifyDBInstanceInput struct { // a MonitoringRoleArn value. MonitoringRoleArn *string `type:"string"` - // Specifies if the DB instance is a Multi-AZ deployment. Changing this parameter - // doesn't result in an outage and the change is applied during the next maintenance - // window unless the ApplyImmediately parameter is set to true for this request. + // A value that indicates whether the DB instance is a Multi-AZ deployment. + // Changing this parameter doesn't result in an outage and the change is applied + // during the next maintenance window unless the ApplyImmediately parameter + // is enabled for this request. MultiAZ *bool `type:"boolean"` // The new DB instance identifier for the DB instance when renaming a DB instance. - // When you change the DB instance identifier, an instance reboot will occur - // immediately if you set Apply Immediately to true, or will occur during the - // next maintenance window if Apply Immediately to false. This value is stored - // as a lowercase string. + // When you change the DB instance identifier, an instance reboot occurs immediately + // if you enable ApplyImmediately, or will occur during the next maintenance + // window if you disable Apply Immediately. This value is stored as a lowercase + // string. // // Constraints: // @@ -27097,8 +27404,8 @@ type ModifyDBInstanceInput struct { // Indicates that the DB instance should be associated with the specified option // group. Changing this parameter doesn't result in an outage except in the // following case and the change is applied during the next maintenance window - // unless the ApplyImmediately parameter is set to true for this request. If - // the parameter change results in an option group that enables OEM, this change + // unless the ApplyImmediately parameter is enabled for this request. If the + // parameter change results in an option group that enables OEM, this change // can cause a brief (sub-second) period during which new connections are rejected // but existing connections are not interrupted. // @@ -27110,6 +27417,11 @@ type ModifyDBInstanceInput struct { // The AWS KMS key identifier for encryption of Performance Insights data. The // KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the // KMS key alias for the KMS encryption key. + // + // If you do not specify a value for PerformanceInsightsKMSKeyId, then Amazon + // RDS uses your default encryption key. AWS KMS creates the default encryption + // key for your AWS account. Your AWS account has a different default encryption + // key for each AWS Region. PerformanceInsightsKMSKeyId *string `type:"string"` // The amount of time, in days, to retain Performance Insights data. Valid values @@ -27169,20 +27481,18 @@ type ModifyDBInstanceInput struct { // Valid Values: 0 - 15 PromotionTier *int64 `type:"integer"` - // Boolean value that indicates if the DB instance has a publicly resolvable - // DNS name. Set to True to make the DB instance Internet-facing with a publicly - // resolvable DNS name, which resolves to a public IP address. Set to False - // to make the DB instance internal with a DNS name that resolves to a private - // IP address. + // A value that indicates whether the DB instance is publicly accessible. When + // the DB instance is publicly accessible, it is an Internet-facing instance + // with a publicly resolvable DNS name, which resolves to a public IP address. + // When the DB instance is not publicly accessible, it is an internal instance + // with a DNS name that resolves to a private IP address. // // PubliclyAccessible only applies to DB instances in a VPC. The DB instance - // must be part of a public subnet and PubliclyAccessible must be true in order - // for it to be publicly accessible. + // must be part of a public subnet and PubliclyAccessible must be enabled for + // it to be publicly accessible. // // Changes to the PubliclyAccessible parameter are applied immediately regardless // of the value of the ApplyImmediately parameter. - // - // Default: false PubliclyAccessible *bool `type:"boolean"` // Specifies the storage type to be associated with the DB instance. @@ -27205,7 +27515,7 @@ type ModifyDBInstanceInput struct { // // Valid values: standard | gp2 | io1 // - // Default: io1 if the Iops parameter is specified, otherwise standard + // Default: io1 if the Iops parameter is specified, otherwise gp2 StorageType *string `type:"string"` // The ARN from the key store with which to associate the instance for TDE encryption. @@ -27215,7 +27525,7 @@ type ModifyDBInstanceInput struct { // device. TdeCredentialPassword *string `type:"string"` - // A value that specifies that the DB instance class of the DB instance uses + // A value that indicates whether the DB instance class of the DB instance uses // its default processor features. UseDefaultProcessorFeatures *bool `type:"boolean"` @@ -27898,7 +28208,7 @@ func (s *ModifyDBSubnetGroupOutput) SetDBSubnetGroup(v *DBSubnetGroup) *ModifyDB type ModifyEventSubscriptionInput struct { _ struct{} `type:"structure"` - // A Boolean value; set to true to activate the subscription. + // A value that indicates whether to activate the subscription. Enabled *bool `type:"boolean"` // A list of event categories for a SourceType that you want to subscribe to. @@ -28007,7 +28317,8 @@ type ModifyGlobalClusterInput struct { _ struct{} `type:"structure"` // Indicates if the global database cluster has deletion protection enabled. - // The global database cluster can't be deleted when this value is set to true. + // The global database cluster can't be deleted when deletion protection is + // enabled. DeletionProtection *bool `type:"boolean"` // The DB cluster identifier for the global cluster being modified. This parameter @@ -28087,8 +28398,9 @@ func (s *ModifyGlobalClusterOutput) SetGlobalCluster(v *GlobalCluster) *ModifyGl type ModifyOptionGroupInput struct { _ struct{} `type:"structure"` - // Indicates whether the changes should be applied immediately, or during the - // next maintenance window for each instance associated with the option group. + // A value that indicates whether to apply the change immediately or during + // the next maintenance window for each instance associated with the option + // group. ApplyImmediately *bool `type:"boolean"` // The name of the option group to be modified. @@ -29905,10 +30217,11 @@ type RebootDBInstanceInput struct { // DBInstanceIdentifier is a required field DBInstanceIdentifier *string `type:"string" required:"true"` - // When true, the reboot is conducted through a MultiAZ failover. + // A value that indicates whether the reboot is conducted through a Multi-AZ + // failover. // - // Constraint: You can't specify true if the instance is not configured for - // MultiAZ. + // Constraint: You can't enable force failover if the instance is not configured + // for Multi-AZ. ForceFailover *bool `type:"boolean"` } @@ -30635,12 +30948,12 @@ type ResetDBClusterParameterGroupInput struct { // A list of parameter names in the DB cluster parameter group to reset to the // default values. You can't use this parameter if the ResetAllParameters parameter - // is set to true. + // is enabled. Parameters []*Parameter `locationNameList:"Parameter" type:"list"` - // A value that is set to true to reset all parameters in the DB cluster parameter - // group to their default values, and false otherwise. You can't use this parameter - // if there is a list of parameter names specified for the Parameters parameter. + // A value that indicates whether to reset all parameters in the DB cluster + // parameter group to their default values. You can't use this parameter if + // there is a list of parameter names specified for the Parameters parameter. ResetAllParameters *bool `type:"boolean"` } @@ -30723,10 +31036,9 @@ type ResetDBParameterGroupInput struct { // Valid Values (for Apply method): pending-reboot Parameters []*Parameter `locationNameList:"Parameter" type:"list"` - // Specifies whether (true) or not (false) to reset all parameters in the DB - // parameter group to default values. - // - // Default: true + // A value that indicates whether to reset all parameters in the DB parameter + // group to default values. By default, all parameters in the DB parameter group + // are reset to default values. ResetAllParameters *bool `type:"boolean"` } @@ -30837,8 +31149,8 @@ type RestoreDBClusterFromS3Input struct { // with the specified CharacterSet. CharacterSetName *string `type:"string"` - // True to copy all tags from the restored DB cluster to snapshots of the restored - // DB cluster, and otherwise false. The default is false. + // A value that indicates whether to copy all tags from the restored DB cluster + // to snapshots of the restored DB cluster. The default is not to copy them. CopyTagsToSnapshot *bool `type:"boolean"` // The name of the DB cluster to create from the source data in the Amazon S3 @@ -30875,9 +31187,9 @@ type RestoreDBClusterFromS3Input struct { // The database name for the restored DB cluster. DatabaseName *string `type:"string"` - // Indicates if the DB cluster should have deletion protection enabled. The - // database can't be deleted when this value is set to true. The default is - // false. + // A value that indicates whether the DB cluster has deletion protection enabled. + // The database can't be deleted when deletion protection is enabled. By default, + // deletion protection is disabled. DeletionProtection *bool `type:"boolean"` // The list of logs that the restored DB cluster is to export to CloudWatch @@ -30886,10 +31198,8 @@ type RestoreDBClusterFromS3Input struct { // in the Amazon Aurora User Guide. EnableCloudwatchLogsExports []*string `type:"list"` - // True to enable mapping of AWS Identity and Access Management (IAM) accounts - // to database accounts, and otherwise false. - // - // Default: false + // A value that indicates whether to enable mapping of AWS Identity and Access + // Management (IAM) accounts to database accounts. By default, mapping is disabled. EnableIAMDatabaseAuthentication *bool `type:"boolean"` // The name of the database engine to be used for the restored DB cluster. @@ -30917,7 +31227,7 @@ type RestoreDBClusterFromS3Input struct { // the KMS encryption key used to encrypt the new DB cluster, then you can use // the KMS key alias instead of the ARN for the KM encryption key. // - // If the StorageEncrypted parameter is true, and you do not specify a value + // If the StorageEncrypted parameter is enabled, and you do not specify a value // for the KmsKeyId parameter, then Amazon RDS will use your default encryption // key. AWS KMS creates the default encryption key for your AWS account. Your // AWS account has a different default encryption key for each AWS Region. @@ -31028,7 +31338,7 @@ type RestoreDBClusterFromS3Input struct { // SourceEngineVersion is a required field SourceEngineVersion *string `type:"string" required:"true"` - // Specifies whether the restored DB cluster is encrypted. + // A value that indicates whether the restored DB cluster is encrypted. StorageEncrypted *bool `type:"boolean"` // A list of tags. For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) @@ -31301,8 +31611,8 @@ type RestoreDBClusterFromSnapshotInput struct { // hours). BacktrackWindow *int64 `type:"long"` - // True to copy all tags from the restored DB cluster to snapshots of the restored - // DB cluster, and otherwise false. The default is false. + // A value that indicates whether to copy all tags from the restored DB cluster + // to snapshots of the restored DB cluster. The default is not to copy them. CopyTagsToSnapshot *bool `type:"boolean"` // The name of the DB cluster to create from the DB snapshot or DB cluster snapshot. @@ -31347,9 +31657,9 @@ type RestoreDBClusterFromSnapshotInput struct { // The database name for the restored DB cluster. DatabaseName *string `type:"string"` - // Indicates if the DB cluster should have deletion protection enabled. The - // database can't be deleted when this value is set to true. The default is - // false. + // A value that indicates whether the DB cluster has deletion protection enabled. + // The database can't be deleted when deletion protection is enabled. By default, + // deletion protection is disabled. DeletionProtection *bool `type:"boolean"` // The list of logs that the restored DB cluster is to export to Amazon CloudWatch @@ -31358,10 +31668,8 @@ type RestoreDBClusterFromSnapshotInput struct { // in the Amazon Aurora User Guide. EnableCloudwatchLogsExports []*string `type:"list"` - // True to enable mapping of AWS Identity and Access Management (IAM) accounts - // to database accounts, and otherwise false. - // - // Default: false + // A value that indicates whether to enable mapping of AWS Identity and Access + // Management (IAM) accounts to database accounts. By default, mapping is disabled. EnableIAMDatabaseAuthentication *bool `type:"boolean"` // The database engine to use for the new DB cluster. @@ -31622,8 +31930,8 @@ type RestoreDBClusterToPointInTimeInput struct { // hours). BacktrackWindow *int64 `type:"long"` - // True to copy all tags from the restored DB cluster to snapshots of the restored - // DB cluster, and otherwise false. The default is false. + // A value that indicates whether to copy all tags from the restored DB cluster + // to snapshots of the restored DB cluster. The default is not to copy them. CopyTagsToSnapshot *bool `type:"boolean"` // The name of the new DB cluster to be created. @@ -31662,9 +31970,9 @@ type RestoreDBClusterToPointInTimeInput struct { // Example: mySubnetgroup DBSubnetGroupName *string `type:"string"` - // Indicates if the DB cluster should have deletion protection enabled. The - // database can't be deleted when this value is set to true. The default is - // false. + // A value that indicates whether the DB cluster has deletion protection enabled. + // The database can't be deleted when deletion protection is enabled. By default, + // deletion protection is disabled. DeletionProtection *bool `type:"boolean"` // The list of logs that the restored DB cluster is to export to CloudWatch @@ -31673,10 +31981,8 @@ type RestoreDBClusterToPointInTimeInput struct { // in the Amazon Aurora User Guide. EnableCloudwatchLogsExports []*string `type:"list"` - // True to enable mapping of AWS Identity and Access Management (IAM) accounts - // to database accounts, and otherwise false. - // - // Default: false + // A value that indicates whether to enable mapping of AWS Identity and Access + // Management (IAM) accounts to database accounts. By default, mapping is disabled. EnableIAMDatabaseAuthentication *bool `type:"boolean"` // The AWS KMS key identifier to use when restoring an encrypted DB cluster @@ -31725,9 +32031,9 @@ type RestoreDBClusterToPointInTimeInput struct { // // * Must be specified if UseLatestRestorableTime parameter is not provided // - // * Can't be specified if UseLatestRestorableTime parameter is true + // * Can't be specified if the UseLatestRestorableTime parameter is enabled // - // * Can't be specified if RestoreType parameter is copy-on-write + // * Can't be specified if the RestoreType parameter is copy-on-write // // Example: 2015-03-07T23:45:00Z RestoreToTime *time.Time `type:"timestamp"` @@ -31761,10 +32067,9 @@ type RestoreDBClusterToPointInTimeInput struct { // in the Amazon RDS User Guide. Tags []*Tag `locationNameList:"Tag" type:"list"` - // A value that is set to true to restore the DB cluster to the latest restorable - // backup time, and false otherwise. - // - // Default: false + // A value that indicates whether to restore the DB cluster to the latest restorable + // backup time. By default, the DB cluster is not restored to the latest restorable + // backup time. // // Constraints: Can't be specified if RestoreToTime parameter is provided. UseLatestRestorableTime *bool `type:"boolean"` @@ -31930,22 +32235,22 @@ func (s *RestoreDBClusterToPointInTimeOutput) SetDBCluster(v *DBCluster) *Restor type RestoreDBInstanceFromDBSnapshotInput struct { _ struct{} `type:"structure"` - // Indicates that minor version upgrades are applied automatically to the DB - // instance during the maintenance window. + // A value that indicates whether minor version upgrades are applied automatically + // to the DB instance during the maintenance window. AutoMinorVersionUpgrade *bool `type:"boolean"` // The Availability Zone (AZ) where the DB instance will be created. // // Default: A random, system-chosen Availability Zone. // - // Constraint: You can't specify the AvailabilityZone parameter if the MultiAZ - // parameter is set to true. + // Constraint: You can't specify the AvailabilityZone parameter if the DB instance + // is a Multi-AZ deployment. // // Example: us-east-1a AvailabilityZone *string `type:"string"` - // True to copy all tags from the restored DB instance to snapshots of the restored - // DB instance, and otherwise false. The default is false. + // A value that indicates whether to copy all tags from the restored DB instance + // to snapshots of the DB instance. By default, tags are not copied. CopyTagsToSnapshot *bool `type:"boolean"` // The compute and memory capacity of the Amazon RDS DB instance, for example, @@ -32012,9 +32317,10 @@ type RestoreDBInstanceFromDBSnapshotInput struct { // Example: mySubnetgroup DBSubnetGroupName *string `type:"string"` - // Indicates if the DB instance should have deletion protection enabled. The - // database can't be deleted when this value is set to true. The default is - // false. For more information, see Deleting a DB Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). + // A value that indicates whether the DB instance has deletion protection enabled. + // The database can't be deleted when deletion protection is enabled. By default, + // deletion protection is disabled. For more information, see Deleting a DB + // Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). DeletionProtection *bool `type:"boolean"` // Specify the Active Directory Domain to restore the instance in. @@ -32030,16 +32336,14 @@ type RestoreDBInstanceFromDBSnapshotInput struct { // in the Amazon Aurora User Guide. EnableCloudwatchLogsExports []*string `type:"list"` - // True to enable mapping of AWS Identity and Access Management (IAM) accounts - // to database accounts, and otherwise false. + // A value that indicates whether to enable mapping of AWS Identity and Access + // Management (IAM) accounts to database accounts. By default, mapping is disabled. // // You can enable IAM database authentication for the following database engines // // * For MySQL 5.6, minor version 5.6.34 or higher // // * For MySQL 5.7, minor version 5.7.16 or higher - // - // Default: false EnableIAMDatabaseAuthentication *bool `type:"boolean"` // The database engine to use for the new instance. @@ -32096,10 +32400,10 @@ type RestoreDBInstanceFromDBSnapshotInput struct { // Valid values: license-included | bring-your-own-license | general-public-license LicenseModel *string `type:"string"` - // Specifies if the DB instance is a Multi-AZ deployment. + // A value that indicates whether the DB instance is a Multi-AZ deployment. // - // Constraint: You can't specify the AvailabilityZone parameter if the MultiAZ - // parameter is set to true. + // Constraint: You can't specify the AvailabilityZone parameter if the DB instance + // is a Multi-AZ deployment. MultiAZ *bool `type:"boolean"` // The name of the option group to be used for the restored DB instance. @@ -32120,11 +32424,12 @@ type RestoreDBInstanceFromDBSnapshotInput struct { // class of the DB instance. ProcessorFeatures []*ProcessorFeature `locationNameList:"ProcessorFeature" type:"list"` - // Specifies the accessibility options for the DB instance. A value of true - // specifies an Internet-facing instance with a publicly resolvable DNS name, - // which resolves to a public IP address. A value of false specifies an internal - // instance with a DNS name that resolves to a private IP address. For more - // information, see CreateDBInstance. + // A value that indicates whether the DB instance is publicly accessible. When + // the DB instance is publicly accessible, it is an Internet-facing instance + // with a publicly resolvable DNS name, which resolves to a public IP address. + // When the DB instance is not publicly accessible, it is an internal instance + // with a DNS name that resolves to a private IP address. For more information, + // see CreateDBInstance. PubliclyAccessible *bool `type:"boolean"` // Specifies the storage type to be associated with the DB instance. @@ -32133,7 +32438,7 @@ type RestoreDBInstanceFromDBSnapshotInput struct { // // If you specify io1, you must also include a value for the Iops parameter. // - // Default: io1 if the Iops parameter is specified, otherwise standard + // Default: io1 if the Iops parameter is specified, otherwise gp2 StorageType *string `type:"string"` // A list of tags. For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) @@ -32147,7 +32452,7 @@ type RestoreDBInstanceFromDBSnapshotInput struct { // device. TdeCredentialPassword *string `type:"string"` - // A value that specifies that the DB instance class of the DB instance uses + // A value that indicates whether the DB instance class of the DB instance uses // its default processor features. UseDefaultProcessorFeatures *bool `type:"boolean"` @@ -32387,10 +32692,9 @@ type RestoreDBInstanceFromS3Input struct { // growth. AllocatedStorage *int64 `type:"integer"` - // True to indicate that minor engine upgrades are applied automatically to - // the DB instance during the maintenance window, and otherwise false. - // - // Default: true + // A value that indicates whether minor engine upgrades are applied automatically + // to the DB instance during the maintenance window. By default, minor engine + // upgrades are not applied automatically. AutoMinorVersionUpgrade *bool `type:"boolean"` // The Availability Zone that the DB instance is created in. For information @@ -32403,8 +32707,8 @@ type RestoreDBInstanceFromS3Input struct { // // Example: us-east-1d // - // Constraint: The AvailabilityZone parameter can't be specified if the MultiAZ - // parameter is set to true. The specified Availability Zone must be in the + // Constraint: The AvailabilityZone parameter can't be specified if the DB instance + // is a Multi-AZ deployment. The specified Availability Zone must be in the // same AWS Region as the current endpoint. AvailabilityZone *string `type:"string"` @@ -32413,10 +32717,8 @@ type RestoreDBInstanceFromS3Input struct { // CreateDBInstance. BackupRetentionPeriod *int64 `type:"integer"` - // True to copy all tags from the restored DB instance to snapshots of the restored - // DB instance, and otherwise false. - // - // Default: false. + // A value that indicates whether to copy all tags from the DB instance to snapshots + // of the DB instance. By default, tags are not copied. CopyTagsToSnapshot *bool `type:"boolean"` // The compute and memory capacity of the DB instance, for example, db.m4.large. @@ -32463,9 +32765,10 @@ type RestoreDBInstanceFromS3Input struct { // A DB subnet group to associate with this DB instance. DBSubnetGroupName *string `type:"string"` - // Indicates if the DB instance should have deletion protection enabled. The - // database can't be deleted when this value is set to true. The default is - // false. For more information, see Deleting a DB Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). + // A value that indicates whether the DB instance has deletion protection enabled. + // The database can't be deleted when deletion protection is enabled. By default, + // deletion protection is disabled. For more information, see Deleting a DB + // Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). DeletionProtection *bool `type:"boolean"` // The list of logs that the restored DB instance is to export to CloudWatch @@ -32474,13 +32777,12 @@ type RestoreDBInstanceFromS3Input struct { // in the Amazon RDS User Guide. EnableCloudwatchLogsExports []*string `type:"list"` - // True to enable mapping of AWS Identity and Access Management (IAM) accounts - // to database accounts, and otherwise false. - // - // Default: false + // A value that indicates whether to enable mapping of AWS Identity and Access + // Management (IAM) accounts to database accounts. By default, mapping is disabled. EnableIAMDatabaseAuthentication *bool `type:"boolean"` - // True to enable Performance Insights for the DB instance, and otherwise false. + // A value that indicates whether to enable Performance Insights for the DB + // instance. // // For more information, see Using Amazon Performance Insights (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) // in the Amazon Relational Database Service User Guide. @@ -32511,7 +32813,7 @@ type RestoreDBInstanceFromS3Input struct { // the KMS encryption key used to encrypt the new DB instance, then you can // use the KMS key alias instead of the ARN for the KM encryption key. // - // If the StorageEncrypted parameter is true, and you do not specify a value + // If the StorageEncrypted parameter is enabled, and you do not specify a value // for the KmsKeyId parameter, then Amazon RDS will use your default encryption // key. AWS KMS creates the default encryption key for your AWS account. Your // AWS account has a different default encryption key for each AWS Region. @@ -32559,8 +32861,9 @@ type RestoreDBInstanceFromS3Input struct { // a MonitoringRoleArn value. MonitoringRoleArn *string `type:"string"` - // Specifies whether the DB instance is a Multi-AZ deployment. If MultiAZ is - // set to true, you can't set the AvailabilityZone parameter. + // A value that indicates whether the DB instance is a Multi-AZ deployment. + // If the DB instance is a Multi-AZ deployment, you can't set the AvailabilityZone + // parameter. MultiAZ *bool `type:"boolean"` // The name of the option group to associate with this DB instance. If this @@ -32571,6 +32874,11 @@ type RestoreDBInstanceFromS3Input struct { // The AWS KMS key identifier for encryption of Performance Insights data. The // KMS key ID is the Amazon Resource Name (ARN), the KMS key identifier, or // the KMS key alias for the KMS encryption key. + // + // If you do not specify a value for PerformanceInsightsKMSKeyId, then Amazon + // RDS uses your default encryption key. AWS KMS creates the default encryption + // key for your AWS account. Your AWS account has a different default encryption + // key for each AWS Region. PerformanceInsightsKMSKeyId *string `type:"string"` // The amount of time, in days, to retain Performance Insights data. Valid values @@ -32623,11 +32931,12 @@ type RestoreDBInstanceFromS3Input struct { // class of the DB instance. ProcessorFeatures []*ProcessorFeature `locationNameList:"ProcessorFeature" type:"list"` - // Specifies the accessibility options for the DB instance. A value of true - // specifies an Internet-facing instance with a publicly resolvable DNS name, - // which resolves to a public IP address. A value of false specifies an internal - // instance with a DNS name that resolves to a private IP address. For more - // information, see CreateDBInstance. + // A value that indicates whether the DB instance is publicly accessible. When + // the DB instance is publicly accessible, it is an Internet-facing instance + // with a publicly resolvable DNS name, which resolves to a public IP address. + // When the DB instance is not publicly accessible, it is an internal instance + // with a DNS name that resolves to a private IP address. For more information, + // see CreateDBInstance. PubliclyAccessible *bool `type:"boolean"` // The name of your Amazon S3 bucket that contains your database backup file. @@ -32658,7 +32967,7 @@ type RestoreDBInstanceFromS3Input struct { // SourceEngineVersion is a required field SourceEngineVersion *string `type:"string" required:"true"` - // Specifies whether the new DB instance is encrypted or not. + // A value that indicates whether the new DB instance is encrypted or not. StorageEncrypted *bool `type:"boolean"` // Specifies the storage type to be associated with the DB instance. @@ -32667,7 +32976,7 @@ type RestoreDBInstanceFromS3Input struct { // // If you specify io1, you must also include a value for the Iops parameter. // - // Default: io1 if the Iops parameter is specified; otherwise standard + // Default: io1 if the Iops parameter is specified; otherwise gp2 StorageType *string `type:"string"` // A list of tags to associate with this DB instance. For more information, @@ -32675,7 +32984,7 @@ type RestoreDBInstanceFromS3Input struct { // in the Amazon RDS User Guide. Tags []*Tag `locationNameList:"Tag" type:"list"` - // A value that specifies that the DB instance class of the DB instance uses + // A value that indicates whether the DB instance class of the DB instance uses // its default processor features. UseDefaultProcessorFeatures *bool `type:"boolean"` @@ -33010,22 +33319,22 @@ func (s *RestoreDBInstanceFromS3Output) SetDBInstance(v *DBInstance) *RestoreDBI type RestoreDBInstanceToPointInTimeInput struct { _ struct{} `type:"structure"` - // Indicates that minor version upgrades are applied automatically to the DB - // instance during the maintenance window. + // A value that indicates whether minor version upgrades are applied automatically + // to the DB instance during the maintenance window. AutoMinorVersionUpgrade *bool `type:"boolean"` // The Availability Zone (AZ) where the DB instance will be created. // // Default: A random, system-chosen Availability Zone. // - // Constraint: You can't specify the AvailabilityZone parameter if the MultiAZ - // parameter is set to true. + // Constraint: You can't specify the AvailabilityZone parameter if the DB instance + // is a Multi-AZ deployment. // // Example: us-east-1a AvailabilityZone *string `type:"string"` - // True to copy all tags from the restored DB instance to snapshots of the restored - // DB instance, and otherwise false. The default is false. + // A value that indicates whether to copy all tags from the restored DB instance + // to snapshots of the DB instance. By default, tags are not copied. CopyTagsToSnapshot *bool `type:"boolean"` // The compute and memory capacity of the Amazon RDS DB instance, for example, @@ -33064,9 +33373,10 @@ type RestoreDBInstanceToPointInTimeInput struct { // Example: mySubnetgroup DBSubnetGroupName *string `type:"string"` - // Indicates if the DB instance should have deletion protection enabled. The - // database can't be deleted when this value is set to true. The default is - // false. For more information, see Deleting a DB Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). + // A value that indicates whether the DB instance has deletion protection enabled. + // The database can't be deleted when deletion protection is enabled. By default, + // deletion protection is disabled. For more information, see Deleting a DB + // Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). DeletionProtection *bool `type:"boolean"` // Specify the Active Directory Domain to restore the instance in. @@ -33082,16 +33392,14 @@ type RestoreDBInstanceToPointInTimeInput struct { // in the Amazon RDS User Guide. EnableCloudwatchLogsExports []*string `type:"list"` - // True to enable mapping of AWS Identity and Access Management (IAM) accounts - // to database accounts, and otherwise false. + // A value that indicates whether to enable mapping of AWS Identity and Access + // Management (IAM) accounts to database accounts. By default, mapping is disabled. // // You can enable IAM database authentication for the following database engines // // * For MySQL 5.6, minor version 5.6.34 or higher // // * For MySQL 5.7, minor version 5.7.16 or higher - // - // Default: false EnableIAMDatabaseAuthentication *bool `type:"boolean"` // The database engine to use for the new instance. @@ -33142,10 +33450,10 @@ type RestoreDBInstanceToPointInTimeInput struct { // Valid values: license-included | bring-your-own-license | general-public-license LicenseModel *string `type:"string"` - // Specifies if the DB instance is a Multi-AZ deployment. + // A value that indicates whether the DB instance is a Multi-AZ deployment. // - // Constraint: You can't specify the AvailabilityZone parameter if the MultiAZ - // parameter is set to true. + // Constraint: You can't specify the AvailabilityZone parameter if the DB instance + // is a Multi-AZ deployment. MultiAZ *bool `type:"boolean"` // The name of the option group to be used for the restored DB instance. @@ -33166,11 +33474,12 @@ type RestoreDBInstanceToPointInTimeInput struct { // class of the DB instance. ProcessorFeatures []*ProcessorFeature `locationNameList:"ProcessorFeature" type:"list"` - // Specifies the accessibility options for the DB instance. A value of true - // specifies an Internet-facing instance with a publicly resolvable DNS name, - // which resolves to a public IP address. A value of false specifies an internal - // instance with a DNS name that resolves to a private IP address. For more - // information, see CreateDBInstance. + // A value that indicates whether the DB instance is publicly accessible. When + // the DB instance is publicly accessible, it is an Internet-facing instance + // with a publicly resolvable DNS name, which resolves to a public IP address. + // When the DB instance is not publicly accessible, it is an internal instance + // with a DNS name that resolves to a private IP address. For more information, + // see CreateDBInstance. PubliclyAccessible *bool `type:"boolean"` // The date and time to restore from. @@ -33181,7 +33490,7 @@ type RestoreDBInstanceToPointInTimeInput struct { // // * Must be before the latest restorable time for the DB instance // - // * Can't be specified if UseLatestRestorableTime parameter is true + // * Can't be specified if the UseLatestRestorableTime parameter is enabled // // Example: 2009-09-07T23:45:00Z RestoreTime *time.Time `type:"timestamp"` @@ -33202,7 +33511,7 @@ type RestoreDBInstanceToPointInTimeInput struct { // // If you specify io1, you must also include a value for the Iops parameter. // - // Default: io1 if the Iops parameter is specified, otherwise standard + // Default: io1 if the Iops parameter is specified, otherwise gp2 StorageType *string `type:"string"` // A list of tags. For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) @@ -33229,16 +33538,15 @@ type RestoreDBInstanceToPointInTimeInput struct { // device. TdeCredentialPassword *string `type:"string"` - // A value that specifies that the DB instance class of the DB instance uses + // A value that indicates whether the DB instance class of the DB instance uses // its default processor features. UseDefaultProcessorFeatures *bool `type:"boolean"` - // Specifies whether (true) or not (false) the DB instance is restored from - // the latest backup time. - // - // Default: false + // A value that indicates whether the DB instance is restored from the latest + // backup time. By default, the DB instance is not restored from the latest + // backup time. // - // Constraints: Can't be specified if RestoreTime parameter is provided. + // Constraints: Can't be specified if the RestoreTime parameter is provided. UseLatestRestorableTime *bool `type:"boolean"` // A list of EC2 VPC security groups to associate with this DB instance. @@ -33631,7 +33939,7 @@ func (s *RevokeDBSecurityGroupIngressOutput) SetDBSecurityGroup(v *DBSecurityGro type ScalingConfiguration struct { _ struct{} `type:"structure"` - // A value that specifies whether to allow or disallow automatic pause for an + // A value that indicates whether to allow or disallow automatic pause for an // Aurora DB cluster in serverless DB engine mode. A DB cluster can be paused // only when it's idle (it has no connections). // @@ -33642,14 +33950,14 @@ type ScalingConfiguration struct { // The maximum capacity for an Aurora DB cluster in serverless DB engine mode. // - // Valid capacity values are 2, 4, 8, 16, 32, 64, 128, and 256. + // Valid capacity values are 1, 2, 4, 8, 16, 32, 64, 128, and 256. // // The maximum capacity must be greater than or equal to the minimum capacity. MaxCapacity *int64 `type:"integer"` // The minimum capacity for an Aurora DB cluster in serverless DB engine mode. // - // Valid capacity values are 2, 4, 8, 16, 32, 64, 128, and 256. + // Valid capacity values are 1, 2, 4, 8, 16, 32, 64, 128, and 256. // // The minimum capacity must be less than or equal to the maximum capacity. MinCapacity *int64 `type:"integer"` @@ -33660,11 +33968,14 @@ type ScalingConfiguration struct { // The action to take when the timeout is reached, either ForceApplyCapacityChange // or RollbackCapacityChange. // - // ForceApplyCapacityChange, the default, sets the capacity to the specified - // value as soon as possible. + // ForceApplyCapacityChange sets the capacity to the specified value as soon + // as possible. // - // RollbackCapacityChange ignores the capacity change if a scaling point is - // not found in the timeout period. + // RollbackCapacityChange, the default, ignores the capacity change if a scaling + // point is not found in the timeout period. + // + // If you specify ForceApplyCapacityChange, connections that prevent Aurora + // Serverless from finding a scaling point might be dropped. // // For more information, see Autoscaling for Aurora Serverless (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.how-it-works.html#aurora-serverless.how-it-works.auto-scaling) // in the Amazon Aurora User Guide. @@ -33825,6 +34136,147 @@ func (s *SourceRegion) SetStatus(v string) *SourceRegion { return s } +type StartActivityStreamInput struct { + _ struct{} `type:"structure"` + + // Specifies whether or not the database activity stream is to start as soon + // as possible, regardless of the maintenance window for the database. + ApplyImmediately *bool `type:"boolean"` + + // The AWS KMS key identifier for encrypting messages in the database activity + // stream. The key identifier can be either a key ID, a key ARN, or a key alias. + // + // KmsKeyId is a required field + KmsKeyId *string `type:"string" required:"true"` + + // Specifies the mode of the database activity stream. Database events such + // as a change or access generate an activity stream event. The database session + // can handle these events either synchronously or asynchronously. + // + // Mode is a required field + Mode *string `type:"string" required:"true" enum:"ActivityStreamMode"` + + // The Amazon Resource Name (ARN) of the DB cluster, for example arn:aws:rds:us-east-1:12345667890:cluster:das-cluster. + // + // ResourceArn is a required field + ResourceArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StartActivityStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartActivityStreamInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartActivityStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartActivityStreamInput"} + if s.KmsKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KmsKeyId")) + } + if s.Mode == nil { + invalidParams.Add(request.NewErrParamRequired("Mode")) + } + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApplyImmediately sets the ApplyImmediately field's value. +func (s *StartActivityStreamInput) SetApplyImmediately(v bool) *StartActivityStreamInput { + s.ApplyImmediately = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *StartActivityStreamInput) SetKmsKeyId(v string) *StartActivityStreamInput { + s.KmsKeyId = &v + return s +} + +// SetMode sets the Mode field's value. +func (s *StartActivityStreamInput) SetMode(v string) *StartActivityStreamInput { + s.Mode = &v + return s +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *StartActivityStreamInput) SetResourceArn(v string) *StartActivityStreamInput { + s.ResourceArn = &v + return s +} + +type StartActivityStreamOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether or not the database activity stream will start as soon + // as possible, regardless of the maintenance window for the database. + ApplyImmediately *bool `type:"boolean"` + + // The name of the Amazon Kinesis data stream to be used for the database activity + // stream. + KinesisStreamName *string `type:"string"` + + // The AWS KMS key identifier for encryption of messages in the database activity + // stream. + KmsKeyId *string `type:"string"` + + // The mode of the database activity stream. + Mode *string `type:"string" enum:"ActivityStreamMode"` + + // The status of the database activity stream. + Status *string `type:"string" enum:"ActivityStreamStatus"` +} + +// String returns the string representation +func (s StartActivityStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartActivityStreamOutput) GoString() string { + return s.String() +} + +// SetApplyImmediately sets the ApplyImmediately field's value. +func (s *StartActivityStreamOutput) SetApplyImmediately(v bool) *StartActivityStreamOutput { + s.ApplyImmediately = &v + return s +} + +// SetKinesisStreamName sets the KinesisStreamName field's value. +func (s *StartActivityStreamOutput) SetKinesisStreamName(v string) *StartActivityStreamOutput { + s.KinesisStreamName = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *StartActivityStreamOutput) SetKmsKeyId(v string) *StartActivityStreamOutput { + s.KmsKeyId = &v + return s +} + +// SetMode sets the Mode field's value. +func (s *StartActivityStreamOutput) SetMode(v string) *StartActivityStreamOutput { + s.Mode = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *StartActivityStreamOutput) SetStatus(v string) *StartActivityStreamOutput { + s.Status = &v + return s +} + type StartDBClusterInput struct { _ struct{} `type:"structure"` @@ -33953,6 +34405,98 @@ func (s *StartDBInstanceOutput) SetDBInstance(v *DBInstance) *StartDBInstanceOut return s } +type StopActivityStreamInput struct { + _ struct{} `type:"structure"` + + // Specifies whether or not the database activity stream is to stop as soon + // as possible, regardless of the maintenance window for the database. + ApplyImmediately *bool `type:"boolean"` + + // The Amazon Resource Name (ARN) of the DB cluster for the database activity + // stream. For example, arn:aws:rds:us-east-1:12345667890:cluster:das-cluster. + // + // ResourceArn is a required field + ResourceArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StopActivityStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopActivityStreamInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopActivityStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopActivityStreamInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApplyImmediately sets the ApplyImmediately field's value. +func (s *StopActivityStreamInput) SetApplyImmediately(v bool) *StopActivityStreamInput { + s.ApplyImmediately = &v + return s +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *StopActivityStreamInput) SetResourceArn(v string) *StopActivityStreamInput { + s.ResourceArn = &v + return s +} + +type StopActivityStreamOutput struct { + _ struct{} `type:"structure"` + + // The name of the Amazon Kinesis data stream used for the database activity + // stream. + KinesisStreamName *string `type:"string"` + + // The AWS KMS key identifier used for encrypting messages in the database activity + // stream. + KmsKeyId *string `type:"string"` + + // The status of the database activity stream. + Status *string `type:"string" enum:"ActivityStreamStatus"` +} + +// String returns the string representation +func (s StopActivityStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopActivityStreamOutput) GoString() string { + return s.String() +} + +// SetKinesisStreamName sets the KinesisStreamName field's value. +func (s *StopActivityStreamOutput) SetKinesisStreamName(v string) *StopActivityStreamOutput { + s.KinesisStreamName = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *StopActivityStreamOutput) SetKmsKeyId(v string) *StopActivityStreamOutput { + s.KmsKeyId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *StopActivityStreamOutput) SetStatus(v string) *StopActivityStreamOutput { + s.Status = &v + return s +} + type StopDBClusterInput struct { _ struct{} `type:"structure"` @@ -34387,6 +34931,28 @@ func (s *VpcSecurityGroupMembership) SetVpcSecurityGroupId(v string) *VpcSecurit return s } +const ( + // ActivityStreamModeSync is a ActivityStreamMode enum value + ActivityStreamModeSync = "sync" + + // ActivityStreamModeAsync is a ActivityStreamMode enum value + ActivityStreamModeAsync = "async" +) + +const ( + // ActivityStreamStatusStopped is a ActivityStreamStatus enum value + ActivityStreamStatusStopped = "stopped" + + // ActivityStreamStatusStarting is a ActivityStreamStatus enum value + ActivityStreamStatusStarting = "starting" + + // ActivityStreamStatusStarted is a ActivityStreamStatus enum value + ActivityStreamStatusStarted = "started" + + // ActivityStreamStatusStopping is a ActivityStreamStatus enum value + ActivityStreamStatusStopping = "stopping" +) + const ( // ApplyMethodImmediate is a ApplyMethod enum value ApplyMethodImmediate = "immediate" diff --git a/vendor/github.com/aws/aws-sdk-go/service/rds/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/rds/customizations.go index d412fb282ba..cee03588640 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/rds/customizations.go +++ b/vendor/github.com/aws/aws-sdk-go/service/rds/customizations.go @@ -48,6 +48,12 @@ func copyDBSnapshotPresign(r *request.Request) { } originParams.DestinationRegion = r.Config.Region + + // preSignedUrl is not required for instances in the same region. + if *originParams.SourceRegion == *originParams.DestinationRegion { + return + } + newParams := awsutil.CopyOf(r.Params).(*CopyDBSnapshotInput) originParams.PreSignedUrl = presignURL(r, originParams.SourceRegion, newParams) } @@ -60,6 +66,11 @@ func createDBInstanceReadReplicaPresign(r *request.Request) { } originParams.DestinationRegion = r.Config.Region + // preSignedUrl is not required for instances in the same region. + if *originParams.SourceRegion == *originParams.DestinationRegion { + return + } + newParams := awsutil.CopyOf(r.Params).(*CreateDBInstanceReadReplicaInput) originParams.PreSignedUrl = presignURL(r, originParams.SourceRegion, newParams) } @@ -72,6 +83,11 @@ func copyDBClusterSnapshotPresign(r *request.Request) { } originParams.DestinationRegion = r.Config.Region + // preSignedUrl is not required for instances in the same region. + if *originParams.SourceRegion == *originParams.DestinationRegion { + return + } + newParams := awsutil.CopyOf(r.Params).(*CopyDBClusterSnapshotInput) originParams.PreSignedUrl = presignURL(r, originParams.SourceRegion, newParams) } @@ -84,6 +100,11 @@ func createDBClusterPresign(r *request.Request) { } originParams.DestinationRegion = r.Config.Region + // preSignedUrl is not required for instances in the same region. + if *originParams.SourceRegion == *originParams.DestinationRegion { + return + } + newParams := awsutil.CopyOf(r.Params).(*CreateDBClusterInput) originParams.PreSignedUrl = presignURL(r, originParams.SourceRegion, newParams) } diff --git a/vendor/github.com/aws/aws-sdk-go/service/securityhub/api.go b/vendor/github.com/aws/aws-sdk-go/service/securityhub/api.go index 83ecb2e2c15..07aa6990383 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/securityhub/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/securityhub/api.go @@ -58,7 +58,7 @@ func (c *SecurityHub) AcceptInvitationRequest(input *AcceptInvitationInput) (req // AcceptInvitation API operation for AWS SecurityHub. // -// Accepts the invitation to be monitored by a master SecurityHub account. +// Accepts the invitation to be monitored by a Security Hub master account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -72,18 +72,18 @@ func (c *SecurityHub) AcceptInvitationRequest(input *AcceptInvitationInput) (req // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The request was rejected because the specified resource cannot be found. +// The request was rejected because we can't find the specified resource. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/AcceptInvitation func (c *SecurityHub) AcceptInvitation(input *AcceptInvitationInput) (*AcceptInvitationOutput, error) { @@ -168,11 +168,11 @@ func (c *SecurityHub) BatchDisableStandardsRequest(input *BatchDisableStandardsI // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond @@ -261,11 +261,11 @@ func (c *SecurityHub) BatchEnableStandardsRequest(input *BatchEnableStandardsInp // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond @@ -337,8 +337,8 @@ func (c *SecurityHub) BatchImportFindingsRequest(input *BatchImportFindingsInput // BatchImportFindings API operation for AWS SecurityHub. // -// Imports security findings that are generated by the integrated third-party -// products into Security Hub. +// Imports security findings generated from an integrated third-party product +// into Security Hub. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -352,15 +352,15 @@ func (c *SecurityHub) BatchImportFindingsRequest(input *BatchImportFindingsInput // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/BatchImportFindings func (c *SecurityHub) BatchImportFindings(input *BatchImportFindingsInput) (*BatchImportFindingsOutput, error) { @@ -443,15 +443,15 @@ func (c *SecurityHub) CreateInsightRequest(input *CreateInsightInput) (req *requ // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeResourceConflictException "ResourceConflictException" // The resource specified in the request conflicts with an existing resource. @@ -522,8 +522,9 @@ func (c *SecurityHub) CreateMembersRequest(input *CreateMembersInput) (req *requ // CreateMembers API operation for AWS SecurityHub. // -// Creates member Security Hub accounts in the current AWS account (which becomes -// the master Security Hub account) that has Security Hub enabled. +// Creates Security Hub member accounts associated with the account used for +// this action, which becomes the Security Hub Master account. Security Hub +// must be enabled in the account used to make this request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -537,15 +538,15 @@ func (c *SecurityHub) CreateMembersRequest(input *CreateMembersInput) (req *requ // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeResourceConflictException "ResourceConflictException" // The resource specified in the request conflicts with an existing resource. @@ -616,8 +617,8 @@ func (c *SecurityHub) DeclineInvitationsRequest(input *DeclineInvitationsInput) // DeclineInvitations API operation for AWS SecurityHub. // -// Declines invitations that are sent to this AWS account (invitee) by the AWS -// accounts (inviters) that are specified by the account IDs. +// Declines invitations that are sent to this AWS account (invitee) from the +// AWS accounts (inviters) that are specified by the provided AccountIds. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -631,14 +632,14 @@ func (c *SecurityHub) DeclineInvitationsRequest(input *DeclineInvitationsInput) // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The request was rejected because the specified resource cannot be found. +// The request was rejected because we can't find the specified resource. // // See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DeclineInvitations func (c *SecurityHub) DeclineInvitations(input *DeclineInvitationsInput) (*DeclineInvitationsOutput, error) { @@ -706,7 +707,7 @@ func (c *SecurityHub) DeleteInsightRequest(input *DeleteInsightInput) (req *requ // DeleteInsight API operation for AWS SecurityHub. // -// Deletes an insight that is specified by the insight ARN. +// Deletes the insight specified by the InsightArn. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -720,18 +721,18 @@ func (c *SecurityHub) DeleteInsightRequest(input *DeleteInsightInput) (req *requ // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The request was rejected because the specified resource cannot be found. +// The request was rejected because we can't find the specified resource. // // See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DeleteInsight func (c *SecurityHub) DeleteInsight(input *DeleteInsightInput) (*DeleteInsightOutput, error) { @@ -799,8 +800,8 @@ func (c *SecurityHub) DeleteInvitationsRequest(input *DeleteInvitationsInput) (r // DeleteInvitations API operation for AWS SecurityHub. // -// Deletes invitations that are sent to this AWS account (invitee) by the AWS -// accounts (inviters) that are specified by their account IDs. +// Deletes invitations that were sent to theis AWS account (invitee) by the +// AWS accounts (inviters) that are specified by their account IDs. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -814,18 +815,18 @@ func (c *SecurityHub) DeleteInvitationsRequest(input *DeleteInvitationsInput) (r // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The request was rejected because the specified resource cannot be found. +// The request was rejected because we can't find the specified resource. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DeleteInvitations func (c *SecurityHub) DeleteInvitations(input *DeleteInvitationsInput) (*DeleteInvitationsOutput, error) { @@ -893,8 +894,7 @@ func (c *SecurityHub) DeleteMembersRequest(input *DeleteMembersInput) (req *requ // DeleteMembers API operation for AWS SecurityHub. // -// Deletes the Security Hub member accounts that are specified by the account -// IDs. +// Deletes the Security Hub member accounts that the account IDs specify. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -908,18 +908,18 @@ func (c *SecurityHub) DeleteMembersRequest(input *DeleteMembersInput) (req *requ // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The request was rejected because the specified resource cannot be found. +// The request was rejected because we can't find the specified resource. // // See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DeleteMembers func (c *SecurityHub) DeleteMembers(input *DeleteMembersInput) (*DeleteMembersOutput, error) { @@ -943,6 +943,152 @@ func (c *SecurityHub) DeleteMembersWithContext(ctx aws.Context, input *DeleteMem return out, req.Send() } +const opDescribeProducts = "DescribeProducts" + +// DescribeProductsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeProducts operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeProducts for more information on using the DescribeProducts +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeProductsRequest method. +// req, resp := client.DescribeProductsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DescribeProducts +func (c *SecurityHub) DescribeProductsRequest(input *DescribeProductsInput) (req *request.Request, output *DescribeProductsOutput) { + op := &request.Operation{ + Name: opDescribeProducts, + HTTPMethod: "GET", + HTTPPath: "/products", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeProductsInput{} + } + + output = &DescribeProductsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeProducts API operation for AWS SecurityHub. +// +// Returns information about the products available that you can subscribe to. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SecurityHub's +// API operation DescribeProducts for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalException "InternalException" +// Internal server error. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error code describes the limit exceeded. +// +// * ErrCodeInvalidAccessException "InvalidAccessException" +// AWS Security Hub isn't enabled for the account used to make this request. +// +// * ErrCodeInvalidInputException "InvalidInputException" +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DescribeProducts +func (c *SecurityHub) DescribeProducts(input *DescribeProductsInput) (*DescribeProductsOutput, error) { + req, out := c.DescribeProductsRequest(input) + return out, req.Send() +} + +// DescribeProductsWithContext is the same as DescribeProducts with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeProducts for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecurityHub) DescribeProductsWithContext(ctx aws.Context, input *DescribeProductsInput, opts ...request.Option) (*DescribeProductsOutput, error) { + req, out := c.DescribeProductsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeProductsPages iterates over the pages of a DescribeProducts operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeProducts method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeProducts operation. +// pageNum := 0 +// err := client.DescribeProductsPages(params, +// func(page *securityhub.DescribeProductsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SecurityHub) DescribeProductsPages(input *DescribeProductsInput, fn func(*DescribeProductsOutput, bool) bool) error { + return c.DescribeProductsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeProductsPagesWithContext same as DescribeProductsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecurityHub) DescribeProductsPagesWithContext(ctx aws.Context, input *DescribeProductsInput, fn func(*DescribeProductsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeProductsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeProductsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeProductsOutput), !p.HasNextPage()) + } + return p.Err() +} + const opDisableImportFindingsForProduct = "DisableImportFindingsForProduct" // DisableImportFindingsForProductRequest generates a "aws/request.Request" representing the @@ -1003,14 +1149,14 @@ func (c *SecurityHub) DisableImportFindingsForProductRequest(input *DisableImpor // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The request was rejected because the specified resource cannot be found. +// The request was rejected because we can't find the specified resource. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond @@ -1083,7 +1229,7 @@ func (c *SecurityHub) DisableSecurityHubRequest(input *DisableSecurityHubInput) // DisableSecurityHub API operation for AWS SecurityHub. // -// Disables the AWS Security Hub Service. +// Disables the Security Hub service. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1101,10 +1247,10 @@ func (c *SecurityHub) DisableSecurityHubRequest(input *DisableSecurityHubInput) // the current AWS account limits. The error code describes the limit exceeded. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The request was rejected because the specified resource cannot be found. +// The request was rejected because we can't find the specified resource. // // See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DisableSecurityHub func (c *SecurityHub) DisableSecurityHub(input *DisableSecurityHubInput) (*DisableSecurityHubOutput, error) { @@ -1187,18 +1333,18 @@ func (c *SecurityHub) DisassociateFromMasterAccountRequest(input *DisassociateFr // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The request was rejected because the specified resource cannot be found. +// The request was rejected because we can't find the specified resource. // // See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DisassociateFromMasterAccount func (c *SecurityHub) DisassociateFromMasterAccount(input *DisassociateFromMasterAccountInput) (*DisassociateFromMasterAccountOutput, error) { @@ -1282,18 +1428,18 @@ func (c *SecurityHub) DisassociateMembersRequest(input *DisassociateMembersInput // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The request was rejected because the specified resource cannot be found. +// The request was rejected because we can't find the specified resource. // // See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/DisassociateMembers func (c *SecurityHub) DisassociateMembers(input *DisassociateMembersInput) (*DisassociateMembersOutput, error) { @@ -1376,11 +1522,11 @@ func (c *SecurityHub) EnableImportFindingsForProductRequest(input *EnableImportF // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeResourceConflictException "ResourceConflictException" // The resource specified in the request conflicts with an existing resource. @@ -1456,7 +1602,7 @@ func (c *SecurityHub) EnableSecurityHubRequest(input *EnableSecurityHubInput) (r // EnableSecurityHub API operation for AWS SecurityHub. // -// Enables the AWS Security Hub service. +// Enables the Security Hub service. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1474,13 +1620,13 @@ func (c *SecurityHub) EnableSecurityHubRequest(input *EnableSecurityHubInput) (r // the current AWS account limits. The error code describes the limit exceeded. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeResourceConflictException "ResourceConflictException" // The resource specified in the request conflicts with an existing resource. // // * ErrCodeAccessDeniedException "AccessDeniedException" -// You do not have permission to to perform the action specified in the request. +// You don't have permission to perform the action specified in the request. // // See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/EnableSecurityHub func (c *SecurityHub) EnableSecurityHub(input *EnableSecurityHubInput) (*EnableSecurityHubOutput, error) { @@ -1562,11 +1708,11 @@ func (c *SecurityHub) GetEnabledStandardsRequest(input *GetEnabledStandardsInput // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond @@ -1644,8 +1790,8 @@ func (c *SecurityHub) GetFindingsRequest(input *GetFindingsInput) (req *request. // GetFindings API operation for AWS SecurityHub. // -// Lists and describes Security Hub-aggregated findings that are specified by -// filter attributes. +// Lists and describes Security Hub-aggregated findings that filter attributes +// specify. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1659,11 +1805,11 @@ func (c *SecurityHub) GetFindingsRequest(input *GetFindingsInput) (req *request. // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond @@ -1785,7 +1931,7 @@ func (c *SecurityHub) GetInsightResultsRequest(input *GetInsightResultsInput) (r // GetInsightResults API operation for AWS SecurityHub. // -// Lists the results of the Security Hub insight specified by the insight ARN. +// Lists the results of the Security Hub insight that the insight ARN specifies. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1799,18 +1945,18 @@ func (c *SecurityHub) GetInsightResultsRequest(input *GetInsightResultsInput) (r // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The request was rejected because the specified resource cannot be found. +// The request was rejected because we can't find the specified resource. // // See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetInsightResults func (c *SecurityHub) GetInsightResults(input *GetInsightResultsInput) (*GetInsightResultsOutput, error) { @@ -1884,7 +2030,7 @@ func (c *SecurityHub) GetInsightsRequest(input *GetInsightsInput) (req *request. // GetInsights API operation for AWS SecurityHub. // -// Lists and describes insights that are specified by insight ARNs. +// Lists and describes insights that insight ARNs specify. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1898,18 +2044,18 @@ func (c *SecurityHub) GetInsightsRequest(input *GetInsightsInput) (req *request. // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The request was rejected because the specified resource cannot be found. +// The request was rejected because we can't find the specified resource. // // See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetInsights func (c *SecurityHub) GetInsights(input *GetInsightsInput) (*GetInsightsOutput, error) { @@ -2042,11 +2188,11 @@ func (c *SecurityHub) GetInvitationsCountRequest(input *GetInvitationsCountInput // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond @@ -2133,18 +2279,18 @@ func (c *SecurityHub) GetMasterAccountRequest(input *GetMasterAccountInput) (req // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The request was rejected because the specified resource cannot be found. +// The request was rejected because we can't find the specified resource. // // See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetMasterAccount func (c *SecurityHub) GetMasterAccount(input *GetMasterAccountInput) (*GetMasterAccountOutput, error) { @@ -2212,8 +2358,8 @@ func (c *SecurityHub) GetMembersRequest(input *GetMembersInput) (req *request.Re // GetMembers API operation for AWS SecurityHub. // -// Returns the details on the Security Hub member accounts that are specified -// by the account IDs. +// Returns the details on the Security Hub member accounts that the account +// IDs specify. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2227,18 +2373,18 @@ func (c *SecurityHub) GetMembersRequest(input *GetMembersInput) (req *request.Re // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The request was rejected because the specified resource cannot be found. +// The request was rejected because we can't find the specified resource. // // See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/GetMembers func (c *SecurityHub) GetMembers(input *GetMembersInput) (*GetMembersOutput, error) { @@ -2323,18 +2469,18 @@ func (c *SecurityHub) InviteMembersRequest(input *InviteMembersInput) (req *requ // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The request was rejected because the specified resource cannot be found. +// The request was rejected because we can't find the specified resource. // // See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/InviteMembers func (c *SecurityHub) InviteMembers(input *InviteMembersInput) (*InviteMembersOutput, error) { @@ -2408,7 +2554,7 @@ func (c *SecurityHub) ListEnabledProductsForImportRequest(input *ListEnabledProd // ListEnabledProductsForImport API operation for AWS SecurityHub. // -// Lists all findings-generating solutions (products) whose findings you've +// Lists all findings-generating solutions (products) whose findings you have // subscribed to receive in Security Hub. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2427,7 +2573,7 @@ func (c *SecurityHub) ListEnabledProductsForImportRequest(input *ListEnabledProd // the current AWS account limits. The error code describes the limit exceeded. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/ListEnabledProductsForImport func (c *SecurityHub) ListEnabledProductsForImport(input *ListEnabledProductsForImportInput) (*ListEnabledProductsForImportOutput, error) { @@ -2560,11 +2706,11 @@ func (c *SecurityHub) ListInvitationsRequest(input *ListInvitationsInput) (req * // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond @@ -2651,11 +2797,11 @@ func (c *SecurityHub) ListMembersRequest(input *ListMembersInput) (req *request. // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond @@ -2683,6 +2829,151 @@ func (c *SecurityHub) ListMembersWithContext(ctx aws.Context, input *ListMembers return out, req.Send() } +const opListProductSubscribers = "ListProductSubscribers" + +// ListProductSubscribersRequest generates a "aws/request.Request" representing the +// client's request for the ListProductSubscribers operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListProductSubscribers for more information on using the ListProductSubscribers +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListProductSubscribersRequest method. +// req, resp := client.ListProductSubscribersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/ListProductSubscribers +func (c *SecurityHub) ListProductSubscribersRequest(input *ListProductSubscribersInput) (req *request.Request, output *ListProductSubscribersOutput) { + op := &request.Operation{ + Name: opListProductSubscribers, + HTTPMethod: "GET", + HTTPPath: "/productSubscribers/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListProductSubscribersInput{} + } + + output = &ListProductSubscribersOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListProductSubscribers API operation for AWS SecurityHub. +// +// Returns a list of account IDs that are subscribed to the product. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SecurityHub's +// API operation ListProductSubscribers for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The request was rejected because we can't find the specified resource. +// +// * ErrCodeInternalException "InternalException" +// Internal server error. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error code describes the limit exceeded. +// +// * ErrCodeInvalidAccessException "InvalidAccessException" +// AWS Security Hub isn't enabled for the account used to make this request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/ListProductSubscribers +func (c *SecurityHub) ListProductSubscribers(input *ListProductSubscribersInput) (*ListProductSubscribersOutput, error) { + req, out := c.ListProductSubscribersRequest(input) + return out, req.Send() +} + +// ListProductSubscribersWithContext is the same as ListProductSubscribers with the addition of +// the ability to pass a context and additional request options. +// +// See ListProductSubscribers for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecurityHub) ListProductSubscribersWithContext(ctx aws.Context, input *ListProductSubscribersInput, opts ...request.Option) (*ListProductSubscribersOutput, error) { + req, out := c.ListProductSubscribersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListProductSubscribersPages iterates over the pages of a ListProductSubscribers operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListProductSubscribers method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListProductSubscribers operation. +// pageNum := 0 +// err := client.ListProductSubscribersPages(params, +// func(page *securityhub.ListProductSubscribersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SecurityHub) ListProductSubscribersPages(input *ListProductSubscribersInput, fn func(*ListProductSubscribersOutput, bool) bool) error { + return c.ListProductSubscribersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListProductSubscribersPagesWithContext same as ListProductSubscribersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecurityHub) ListProductSubscribersPagesWithContext(ctx aws.Context, input *ListProductSubscribersInput, fn func(*ListProductSubscribersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListProductSubscribersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListProductSubscribersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListProductSubscribersOutput), !p.HasNextPage()) + } + return p.Err() +} + const opUpdateFindings = "UpdateFindings" // UpdateFindingsRequest generates a "aws/request.Request" representing the @@ -2728,8 +3019,9 @@ func (c *SecurityHub) UpdateFindingsRequest(input *UpdateFindingsInput) (req *re // UpdateFindings API operation for AWS SecurityHub. // -// Updates the AWS Security Hub-aggregated findings specified by the filter -// attributes. +// Updates the Note and RecordState of the Security Hub-aggregated findings +// that the filter attributes specify. Any member account that can view the +// finding also sees the update to the finding. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2743,18 +3035,18 @@ func (c *SecurityHub) UpdateFindingsRequest(input *UpdateFindingsInput) (req *re // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The request was rejected because the specified resource cannot be found. +// The request was rejected because we can't find the specified resource. // // See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/UpdateFindings func (c *SecurityHub) UpdateFindings(input *UpdateFindingsInput) (*UpdateFindingsOutput, error) { @@ -2823,7 +3115,7 @@ func (c *SecurityHub) UpdateInsightRequest(input *UpdateInsightInput) (req *requ // UpdateInsight API operation for AWS SecurityHub. // -// Updates the AWS Security Hub insight specified by the insight ARN. +// Updates the Security Hub insight that the insight ARN specifies. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2837,18 +3129,18 @@ func (c *SecurityHub) UpdateInsightRequest(input *UpdateInsightInput) (req *requ // Internal server error. // // * ErrCodeInvalidInputException "InvalidInputException" -// The request was rejected because an invalid or out-of-range value was supplied -// for an input parameter. +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. // // * ErrCodeInvalidAccessException "InvalidAccessException" -// AWS Security Hub is not enabled for the account used to make this request. +// AWS Security Hub isn't enabled for the account used to make this request. // // * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" -// The request was rejected because the specified resource cannot be found. +// The request was rejected because we can't find the specified resource. // // See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/UpdateInsight func (c *SecurityHub) UpdateInsight(input *UpdateInsightInput) (*UpdateInsightOutput, error) { @@ -2875,11 +3167,11 @@ func (c *SecurityHub) UpdateInsightWithContext(ctx aws.Context, input *UpdateIns type AcceptInvitationInput struct { _ struct{} `type:"structure"` - // The ID of the invitation that is sent to the AWS account by the Security - // Hub master account. + // The ID of the invitation that the Security Hub master account sends to the + // AWS account. InvitationId *string `type:"string"` - // The account ID of the master Security Hub account whose invitation you're + // The account ID of the Security Hub master account whose invitation you're // accepting. MasterId *string `type:"string"` } @@ -2953,7 +3245,7 @@ func (s *AccountDetails) SetEmail(v string) *AccountDetails { return s } -// The details of an AWS EC2 instance. +// The details of an Amazon EC2 instance. type AwsEc2InstanceDetails struct { _ struct{} `type:"structure"` @@ -2975,13 +3267,13 @@ type AwsEc2InstanceDetails struct { // The date/time the instance was launched. LaunchedAt *string `type:"string"` - // The identifier of the subnet in which the instance was launched. + // The identifier of the subnet that the instance was launched in. SubnetId *string `type:"string"` // The instance type of the instance. Type *string `type:"string"` - // The identifier of the VPC in which the instance was launched. + // The identifier of the VPC that the instance was launched in. VpcId *string `type:"string"` } @@ -3049,7 +3341,7 @@ func (s *AwsEc2InstanceDetails) SetVpcId(v string) *AwsEc2InstanceDetails { return s } -// AWS IAM access key details related to a finding. +// IAM access key details related to a finding. type AwsIamAccessKeyDetails struct { _ struct{} `type:"structure"` @@ -3091,7 +3383,7 @@ func (s *AwsIamAccessKeyDetails) SetUserName(v string) *AwsIamAccessKeyDetails { return s } -// The details of an AWS S3 Bucket. +// The details of an Amazon S3 bucket. type AwsS3BucketDetails struct { _ struct{} `type:"structure"` @@ -3129,29 +3421,29 @@ func (s *AwsS3BucketDetails) SetOwnerName(v string) *AwsS3BucketDetails { // AWS security services and third-party solutions, and compliance checks. // // A finding is a potential security issue generated either by AWS services -// (GuardDuty, Inspector, Macie) or by the integrated third-party solutions -// and compliance checks. +// (Amazon GuardDuty, Amazon Inspector, and Amazon Macie) or by the integrated +// third-party solutions and compliance checks. type AwsSecurityFinding struct { _ struct{} `type:"structure"` - // The AWS account ID in which a finding is generated. + // The AWS account ID that a finding is generated in. // // AwsAccountId is a required field AwsAccountId *string `type:"string" required:"true"` // This data type is exclusive to findings that are generated as the result // of a check run against a specific rule in a supported standard (for example, - // AWS CIS Foundations). Contains compliance-related finding details. + // CIS AWS Foundations). Contains compliance-related finding details. Compliance *Compliance `type:"structure"` // A finding's confidence. Confidence is defined as the likelihood that a finding // accurately identifies the behavior or issue that it was intended to identify. - // Confidence is scored on a 0-100 basis using a ratio scale. 0 equates zero - // percent confidence and 100 equates to 100 percent confidence. + // Confidence is scored on a 0-100 basis using a ratio scale, where 0 means + // zero percent confidence and 100 means 100 percent confidence. Confidence *int64 `type:"integer"` - // An ISO8601-formatted timestamp that indicates when the potential security - // issue captured by a finding was created by the security findings provider. + // An ISO8601-formatted timestamp that indicates when the security-findings + // provider created the potential security issue that a finding captured. // // CreatedAt is a required field CreatedAt *string `type:"string" required:"true"` @@ -3166,14 +3458,13 @@ type AwsSecurityFinding struct { // In this release, Description is a required property. Description *string `type:"string"` - // An ISO8601-formatted timestamp that indicates when the potential security - // issue captured by a finding was first observed by the security findings provider. + // An ISO8601-formatted timestamp that indicates when the security-findings + // provider first observed the potential security issue that a finding captured. FirstObservedAt *string `type:"string"` - // This is the identifier for the solution-specific component (a discrete unit - // of logic) that generated a finding. In various security findings provider's - // solutions, this generator can be called a rule, a check, a detector, a plug-in, - // etc. + // The identifier for the solution-specific component (a discrete unit of logic) + // that generated a finding. In various security-findings providers' solutions, + // this generator can be called a rule, a check, a detector, a plug-in, etc. // // GeneratorId is a required field GeneratorId *string `type:"string" required:"true"` @@ -3183,9 +3474,9 @@ type AwsSecurityFinding struct { // Id is a required field Id *string `type:"string" required:"true"` - // An ISO8601-formatted timestamp that indicates when the potential security - // issue captured by a finding was most recently observed by the security findings - // provider. + // An ISO8601-formatted timestamp that indicates when the security-findings + // provider most recently observed the potential security issue that a finding + // captured. LastObservedAt *string `type:"string"` // A list of malware related to a finding. @@ -3201,14 +3492,14 @@ type AwsSecurityFinding struct { Process *ProcessDetails `type:"structure"` // The ARN generated by Security Hub that uniquely identifies a third-party - // company (security findings provider) once this provider's product (solution + // company (security-findings provider) after this provider's product (solution // that generates findings) is registered with Security Hub. // // ProductArn is a required field ProductArn *string `type:"string" required:"true"` - // A data type where security findings providers can include additional solution-specific - // details that are not part of the defined AwsSecurityFinding format. + // A data type where security-findings providers can include additional solution-specific + // details that aren't part of the defined AwsSecurityFinding format. ProductFields map[string]*string `type:"map"` // The record state of a finding. @@ -3220,13 +3511,13 @@ type AwsSecurityFinding struct { // An data type that describes the remediation options for a finding. Remediation *Remediation `type:"structure"` - // A set of resource data types that describe the resources to which the finding - // refers. + // A set of resource data types that describe the resources that the finding + // refers to. // // Resources is a required field Resources []*Resource `type:"list" required:"true"` - // The schema version for which a finding is formatted. + // The schema version that a finding is formatted for. // // SchemaVersion is a required field SchemaVersion *string `type:"string" required:"true"` @@ -3236,7 +3527,7 @@ type AwsSecurityFinding struct { // Severity is a required field Severity *Severity `type:"structure" required:"true"` - // A URL that links to a page about the current finding in the security findings + // A URL that links to a page about the current finding in the security-findings // provider's solution. SourceUrl *string `type:"string"` @@ -3248,7 +3539,7 @@ type AwsSecurityFinding struct { // In this release, Title is a required property. Title *string `type:"string"` - // One or more finding types in the format of 'namespace/category/classifier' + // One or more finding types in the format of namespace/category/classifier // that classify a finding. // // Valid namespace values are: Software and Configuration Checks | TTPs | Effects @@ -3257,8 +3548,8 @@ type AwsSecurityFinding struct { // Types is a required field Types []*string `type:"list" required:"true"` - // An ISO8601-formatted timestamp that indicates when the finding record was - // last updated by the security findings provider. + // An ISO8601-formatted timestamp that indicates when the security-findings + // provider last updated the finding record. // // UpdatedAt is a required field UpdatedAt *string `type:"string" required:"true"` @@ -3550,7 +3841,7 @@ func (s *AwsSecurityFinding) SetWorkflowState(v string) *AwsSecurityFinding { type AwsSecurityFindingFilters struct { _ struct{} `type:"structure"` - // The AWS account ID in which a finding is generated. + // The AWS account ID that a finding is generated in. AwsAccountId []*StringFilter `type:"list"` // The name of the findings provider (company) that owns the solution (product) @@ -3558,18 +3849,18 @@ type AwsSecurityFindingFilters struct { CompanyName []*StringFilter `type:"list"` // Exclusive to findings that are generated as the result of a check run against - // a specific rule in a supported standard (for example, AWS CIS Foundations). + // a specific rule in a supported standard (for example, CIS AWS Foundations). // Contains compliance-related finding details. ComplianceStatus []*StringFilter `type:"list"` // A finding's confidence. Confidence is defined as the likelihood that a finding // accurately identifies the behavior or issue that it was intended to identify. - // Confidence is scored on a 0-100 basis using a ratio scale. 0 equates zero - // percent confidence and 100 equates to 100 percent confidence. + // Confidence is scored on a 0-100 basis using a ratio scale, where 0 means + // zero percent confidence and 100 means 100 percent confidence. Confidence []*NumberFilter `type:"list"` - // An ISO8601-formatted timestamp that indicates when the potential security - // issue captured by a finding was created by the security findings provider. + // An ISO8601-formatted timestamp that indicates when the security-findings + // provider captured the potential security issue that a finding captured. CreatedAt []*DateFilter `type:"list"` // The level of importance assigned to the resources associated with the finding. @@ -3580,14 +3871,13 @@ type AwsSecurityFindingFilters struct { // A finding's description. Description []*StringFilter `type:"list"` - // An ISO8601-formatted timestamp that indicates when the potential security - // issue captured by a finding was first observed by the security findings provider. + // An ISO8601-formatted timestamp that indicates when the security-findings + // provider first observed the potential security issue that a finding captured. FirstObservedAt []*DateFilter `type:"list"` - // This is the identifier for the solution-specific component (a discrete unit - // of logic) that generated a finding. In various security findings provider's - // solutions, this generator can be called a rule, a check, a detector, a plug-in, - // etc. + // The identifier for the solution-specific component (a discrete unit of logic) + // that generated a finding. In various security-findings providers' solutions, + // this generator can be called a rule, a check, a detector, a plug-in, etc. GeneratorId []*StringFilter `type:"list"` // The security findings provider-specific identifier for a finding. @@ -3596,9 +3886,9 @@ type AwsSecurityFindingFilters struct { // A keyword for a finding. Keyword []*KeywordFilter `type:"list"` - // An ISO8601-formatted timestamp that indicates when the potential security - // issue captured by a finding was most recently observed by the security findings - // provider. + // An ISO8601-formatted timestamp that indicates when the security-findings + // provider most recently observed the potential security issue that a finding + // captured. LastObservedAt []*DateFilter `type:"list"` // The name of the malware that was observed. @@ -3675,12 +3965,12 @@ type AwsSecurityFindingFilters struct { ProcessTerminatedAt []*DateFilter `type:"list"` // The ARN generated by Security Hub that uniquely identifies a third-party - // company (security findings provider) once this provider's product (solution + // company (security findings provider) after this provider's product (solution // that generates findings) is registered with Security Hub. ProductArn []*StringFilter `type:"list"` - // A data type where security findings providers can include additional solution-specific - // details that are not part of the defined AwsSecurityFinding format. + // A data type where security-findings providers can include additional solution-specific + // details that aren't part of the defined AwsSecurityFinding format. ProductFields []*MapFilter `type:"list"` // The name of the solution (product) that generates findings. @@ -3716,13 +4006,13 @@ type AwsSecurityFindingFilters struct { // The date/time the instance was launched. ResourceAwsEc2InstanceLaunchedAt []*DateFilter `type:"list"` - // The identifier of the subnet in which the instance was launched. + // The identifier of the subnet that the instance was launched in. ResourceAwsEc2InstanceSubnetId []*StringFilter `type:"list"` // The instance type of the instance. ResourceAwsEc2InstanceType []*StringFilter `type:"list"` - // The identifier of the VPC in which the instance was launched. + // The identifier of the VPC that the instance was launched in. ResourceAwsEc2InstanceVpcId []*StringFilter `type:"list"` // The creation date/time of the IAM access key related to a finding. @@ -3752,24 +4042,24 @@ type AwsSecurityFindingFilters struct { // The name of the container related to a finding. ResourceContainerName []*StringFilter `type:"list"` - // The details of a resource that does not have a specific sub-field for the - // resource type defined. + // The details of a resource that doesn't have a specific subfield for the resource + // type defined. ResourceDetailsOther []*MapFilter `type:"list"` // The canonical identifier for the given resource type. ResourceId []*StringFilter `type:"list"` - // The canonical AWS partition name to which the region is assigned. + // The canonical AWS partition name that the Region is assigned to. ResourcePartition []*StringFilter `type:"list"` - // The canonical AWS external region name where this resource is located. + // The canonical AWS external Region name where this resource is located. ResourceRegion []*StringFilter `type:"list"` // A list of AWS tags associated with a resource at the time the finding was // processed. ResourceTags []*MapFilter `type:"list"` - // Specifies the type of the resource for which details are provided. + // Specifies the type of the resource that details are provided for. ResourceType []*StringFilter `type:"list"` // The label of a finding's severity. @@ -3778,11 +4068,11 @@ type AwsSecurityFindingFilters struct { // The normalized severity of a finding. SeverityNormalized []*NumberFilter `type:"list"` - // The native severity as defined by the security findings provider's solution + // The native severity as defined by the security-findings provider's solution // that generated the finding. SeverityProduct []*NumberFilter `type:"list"` - // A URL that links to a page about the current finding in the security findings + // A URL that links to a page about the current finding in the security-findings // provider's solution. SourceUrl []*StringFilter `type:"list"` @@ -3807,19 +4097,19 @@ type AwsSecurityFindingFilters struct { // A finding's title. Title []*StringFilter `type:"list"` - // A finding type in the format of 'namespace/category/classifier' that classifies + // A finding type in the format of namespace/category/classifier that classifies // a finding. Type []*StringFilter `type:"list"` - // An ISO8601-formatted timestamp that indicates when the finding record was - // last updated by the security findings provider. + // An ISO8601-formatted timestamp that indicates when the security-findings + // provider last updated the finding record. UpdatedAt []*DateFilter `type:"list"` // A list of name/value string pairs associated with the finding. These are // custom, user-defined fields added to a finding. UserDefinedFields []*MapFilter `type:"list"` - // Indicates the veracity of a finding. + // The veracity of a finding. VerificationState []*StringFilter `type:"list"` // The workflow state of a finding. @@ -4337,7 +4627,7 @@ func (s *AwsSecurityFindingFilters) SetWorkflowState(v []*StringFilter) *AwsSecu type BatchDisableStandardsInput struct { _ struct{} `type:"structure"` - // The ARNS of the standards subscriptions that you want to disable. + // The ARNs of the standards subscriptions that you want to disable. // // StandardsSubscriptionArns is a required field StandardsSubscriptionArns []*string `min:"1" type:"list" required:"true"` @@ -4403,7 +4693,7 @@ type BatchEnableStandardsInput struct { // The list of standards that you want to enable. // - // In this release, Security Hub only supports the CIS AWS Foundations standard. + // In this release, Security Hub supports only the CIS AWS Foundations standard. // // Its ARN is arn:aws:securityhub:::ruleset/cis-aws-foundations-benchmark/v/1.2.0. // @@ -4479,7 +4769,7 @@ func (s *BatchEnableStandardsOutput) SetStandardsSubscriptions(v []*StandardsSub type BatchImportFindingsInput struct { _ struct{} `type:"structure"` - // A list of findings that you want to import. Must be submitted in the AWSSecurityFinding + // A list of findings to import. You must submit them in the AwsSecurityFinding // format. // // Findings is a required field @@ -4528,12 +4818,12 @@ func (s *BatchImportFindingsInput) SetFindings(v []*AwsSecurityFinding) *BatchIm type BatchImportFindingsOutput struct { _ struct{} `type:"structure"` - // The number of findings that cannot be imported. + // The number of findings that failed to import. // // FailedCount is a required field FailedCount *int64 `type:"integer" required:"true"` - // The list of the findings that cannot be imported. + // The list of the findings that failed to import. FailedFindings []*ImportFindingsError `type:"list"` // The number of findings that were successfully imported @@ -4571,12 +4861,12 @@ func (s *BatchImportFindingsOutput) SetSuccessCount(v int64) *BatchImportFinding } // Exclusive to findings that are generated as the result of a check run against -// a specific rule in a supported standard (for example, AWS CIS Foundations). +// a specific rule in a supported standard (for example, CIS AWS Foundations). // Contains compliance-related finding details. type Compliance struct { _ struct{} `type:"structure"` - // Indicates the result of a compliance check. + // The result of a compliance check. Status *string `type:"string" enum:"ComplianceStatus"` } @@ -4650,9 +4940,9 @@ func (s *ContainerDetails) SetName(v string) *ContainerDetails { type CreateInsightInput struct { _ struct{} `type:"structure"` - // A collection of attributes that are applied to all active Security Hub-aggregated - // findings and that result in a subset of findings that are included in this - // insight. + // A collection of attributes that are applied to all of the active findings + // aggregated by Security Hub, and that result in a subset of findings that + // are included in this insight. // // Filters is a required field Filters *AwsSecurityFindingFilters `type:"structure" required:"true"` @@ -4664,7 +4954,7 @@ type CreateInsightInput struct { // GroupByAttribute is a required field GroupByAttribute *string `type:"string" required:"true"` - // The user-defined name that identifies the insight that you want to create. + // The user-defined name that identifies the insight to create. // // Name is a required field Name *string `type:"string" required:"true"` @@ -4720,7 +5010,7 @@ func (s *CreateInsightInput) SetName(v string) *CreateInsightInput { type CreateInsightOutput struct { _ struct{} `type:"structure"` - // The ARN Of the created insight. + // The ARN of the insight created. // // InsightArn is a required field InsightArn *string `type:"string" required:"true"` @@ -4745,8 +5035,8 @@ func (s *CreateInsightOutput) SetInsightArn(v string) *CreateInsightOutput { type CreateMembersInput struct { _ struct{} `type:"structure"` - // A list of account ID and email address pairs of the accounts that you want - // to associate with the master Security Hub account. + // A list of account ID and email address pairs of the accounts to associate + // with the Security Hub master account. AccountDetails []*AccountDetails `type:"list"` } @@ -4769,8 +5059,8 @@ func (s *CreateMembersInput) SetAccountDetails(v []*AccountDetails) *CreateMembe type CreateMembersOutput struct { _ struct{} `type:"structure"` - // A list of account ID and email address pairs of the AWS accounts that could - // not be processed. + // A list of account ID and email address pairs of the AWS accounts that couldn't + // be processed. UnprocessedAccounts []*Result `type:"list"` } @@ -4868,8 +5158,8 @@ func (s *DateRange) SetValue(v int64) *DateRange { type DeclineInvitationsInput struct { _ struct{} `type:"structure"` - // A list of account IDs specifying accounts whose invitations to Security Hub - // you want to decline. + // A list of account IDs that specify the accounts from which invitations to + // Security Hub are declined. AccountIds []*string `type:"list"` } @@ -4892,8 +5182,8 @@ func (s *DeclineInvitationsInput) SetAccountIds(v []*string) *DeclineInvitations type DeclineInvitationsOutput struct { _ struct{} `type:"structure"` - // A list of account ID and email address pairs of the AWS accounts that could - // not be processed. + // A list of account ID and email address pairs of the AWS accounts that couldn't + // be processed. UnprocessedAccounts []*Result `type:"list"` } @@ -4916,7 +5206,7 @@ func (s *DeclineInvitationsOutput) SetUnprocessedAccounts(v []*Result) *DeclineI type DeleteInsightInput struct { _ struct{} `type:"structure"` - // The ARN of the insight that you want to delete. + // The ARN of the insight to delete. // // InsightArn is a required field InsightArn *string `location:"uri" locationName:"InsightArn" type:"string" required:"true"` @@ -4982,8 +5272,8 @@ func (s *DeleteInsightOutput) SetInsightArn(v string) *DeleteInsightOutput { type DeleteInvitationsInput struct { _ struct{} `type:"structure"` - // A list of account IDs specifying accounts whose invitations to Security Hub - // you want to delete. + // A list of account IDs that specify accounts whose invitations to Security + // Hub you want to delete. AccountIds []*string `type:"list"` } @@ -5006,8 +5296,8 @@ func (s *DeleteInvitationsInput) SetAccountIds(v []*string) *DeleteInvitationsIn type DeleteInvitationsOutput struct { _ struct{} `type:"structure"` - // A list of account ID and email address pairs of the AWS accounts that could - // not be processed. + // A list of account ID and email address pairs of the AWS accounts that couldn't + // be processed. UnprocessedAccounts []*Result `type:"list"` } @@ -5054,8 +5344,8 @@ func (s *DeleteMembersInput) SetAccountIds(v []*string) *DeleteMembersInput { type DeleteMembersOutput struct { _ struct{} `type:"structure"` - // A list of account ID and email address pairs of the AWS accounts that could - // not be processed. + // A list of account ID and email address pairs of the AWS accounts that couldn't + // be processed. UnprocessedAccounts []*Result `type:"list"` } @@ -5075,6 +5365,85 @@ func (s *DeleteMembersOutput) SetUnprocessedAccounts(v []*Result) *DeleteMembers return s } +type DescribeProductsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return. + MaxResults *int64 `location:"querystring" locationName:"MaxResults" min:"1" type:"integer"` + + // The token that is required for pagination. + NextToken *string `location:"querystring" locationName:"NextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeProductsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeProductsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeProductsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeProductsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeProductsInput) SetMaxResults(v int64) *DescribeProductsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeProductsInput) SetNextToken(v string) *DescribeProductsInput { + s.NextToken = &v + return s +} + +type DescribeProductsOutput struct { + _ struct{} `type:"structure"` + + // The token that is required for pagination. + NextToken *string `type:"string"` + + // A list of products. + // + // Products is a required field + Products []*Product `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeProductsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeProductsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeProductsOutput) SetNextToken(v string) *DescribeProductsOutput { + s.NextToken = &v + return s +} + +// SetProducts sets the Products field's value. +func (s *DescribeProductsOutput) SetProducts(v []*Product) *DescribeProductsOutput { + s.Products = v + return s +} + type DisableImportFindingsForProductInput struct { _ struct{} `type:"structure"` @@ -5318,11 +5687,11 @@ func (s EnableSecurityHubOutput) GoString() string { type GetEnabledStandardsInput struct { _ struct{} `type:"structure"` - // Indicates the maximum number of items that you want in the response. + // The maximum number of items that you want in the response. MaxResults *int64 `min:"1" type:"integer"` - // Paginates results. Set the value of this parameter to NULL on your first - // call to the GetEnabledStandards operation. For subsequent calls to the operation, + // Paginates results. On your first call to the GetEnabledStandards operation, + // set the value of this parameter to NULL. For subsequent calls to the operation, // fill nextToken in the request with the value of nextToken from the previous // response to continue listing data. NextToken *string `type:"string"` @@ -5410,16 +5779,16 @@ func (s *GetEnabledStandardsOutput) SetStandardsSubscriptions(v []*StandardsSubs type GetFindingsInput struct { _ struct{} `type:"structure"` - // A collection of attributes that is use for querying findings. + // A collection of attributes that is used for querying findings. Filters *AwsSecurityFindingFilters `type:"structure"` // Indicates the maximum number of items that you want in the response. MaxResults *int64 `min:"1" type:"integer"` - // Paginates results. Set the value of this parameter to NULL on your first - // call to the GetFindings operation. For subsequent calls to the operation, - // fill nextToken in the request with the value of nextToken from the previous - // response to continue listing data. + // Paginates results. On your first call to the GetFindings operation, set the + // value of this parameter to NULL. For subsequent calls to the operation, fill + // nextToken in the request with the value of nextToken from the previous response + // to continue listing data. NextToken *string `type:"string"` // A collection of attributes used for sorting findings. @@ -5576,16 +5945,16 @@ func (s *GetInsightResultsOutput) SetInsightResults(v *InsightResults) *GetInsig type GetInsightsInput struct { _ struct{} `type:"structure"` - // The ARNS of the insights that you want to describe. + // The ARNs of the insights that you want to describe. InsightArns []*string `type:"list"` - // Indicates the maximum number of items that you want in the response. + // The maximum number of items that you want in the response. MaxResults *int64 `min:"1" type:"integer"` - // Paginates results. Set the value of this parameter to NULL on your first - // call to the GetInsights operation. For subsequent calls to the operation, - // fill nextToken in the request with the value of nextToken from the previous - // response to continue listing data. + // Paginates results. On your first call to the GetInsights operation, set the + // value of this parameter to NULL. For subsequent calls to the operation, fill + // nextToken in the request with the value of nextToken from the previous response + // to continue listing data. NextToken *string `type:"string"` } @@ -5743,8 +6112,8 @@ func (s *GetMasterAccountOutput) SetMaster(v *Invitation) *GetMasterAccountOutpu type GetMembersInput struct { _ struct{} `type:"structure"` - // A list of account IDs for the Security Hub member accounts on which you want - // to return the details. + // A list of account IDs for the Security Hub member accounts that you want + // to return the details for. // // AccountIds is a required field AccountIds []*string `type:"list" required:"true"` @@ -5785,8 +6154,8 @@ type GetMembersOutput struct { // A list of details about the Security Hub member accounts. Members []*Member `type:"list"` - // A list of account ID and email address pairs of the AWS accounts that could - // not be processed. + // A list of account ID and email address pairs of the AWS accounts that couldn't + // be processed. UnprocessedAccounts []*Result `type:"list"` } @@ -5812,7 +6181,7 @@ func (s *GetMembersOutput) SetUnprocessedAccounts(v []*Result) *GetMembersOutput return s } -// Includes details of the list of the findings that cannot be imported. +// Includes details of the list of the findings that can't be imported. type ImportFindingsError struct { _ struct{} `type:"structure"` @@ -5826,7 +6195,7 @@ type ImportFindingsError struct { // ErrorMessage is a required field ErrorMessage *string `type:"string" required:"true"` - // The id of the error made during the BatchImportFindings operation. + // The ID of the error made during the BatchImportFindings operation. // // Id is a required field Id *string `type:"string" required:"true"` @@ -5871,7 +6240,7 @@ type Insight struct { // Filters is a required field Filters *AwsSecurityFindingFilters `type:"structure" required:"true"` - // The attribute by which the insight's findings are grouped. This attribute + // The attribute that the insight's findings are grouped by. This attribute // is used as a findings aggregator for the purposes of viewing and managing // multiple related findings under a single operand. // @@ -5932,7 +6301,7 @@ type InsightResultValue struct { // Count is a required field Count *int64 `type:"integer" required:"true"` - // The value of the attribute by which the findings are grouped for the insight's + // The value of the attribute that the findings are grouped by for the insight // whose results are returned by the GetInsightResults operation. // // GroupByAttributeValue is a required field @@ -5965,7 +6334,7 @@ func (s *InsightResultValue) SetGroupByAttributeValue(v string) *InsightResultVa type InsightResults struct { _ struct{} `type:"structure"` - // The attribute by which the findings are grouped for the insight's whose results + // The attribute that the findings are grouped by for the insight whose results // are returned by the GetInsightResults operation. // // GroupByAttribute is a required field @@ -6011,15 +6380,15 @@ func (s *InsightResults) SetResultValues(v []*InsightResultValue) *InsightResult return s } -// The details of an invitation sent to an AWS account by the Security Hub master -// account. +// The details of an invitation that the Security Hub master account sent to +// an AWS account. type Invitation struct { _ struct{} `type:"structure"` - // The account ID of the master Security Hub account who sent the invitation. + // The account ID of the Security Hub master account that sent the invitation. AccountId *string `type:"string"` - // The ID of the invitation sent by the master Security Hub account. + // The ID of the invitation that the Security Hub master account sent. InvitationId *string `type:"string"` // The timestamp of when the invitation was sent. @@ -6090,8 +6459,8 @@ func (s *InviteMembersInput) SetAccountIds(v []*string) *InviteMembersInput { type InviteMembersOutput struct { _ struct{} `type:"structure"` - // A list of account ID and email address pairs of the AWS accounts that could - // not be processed. + // A list of account ID and email address pairs of the AWS accounts that couldn't + // be processed. UnprocessedAccounts []*Result `type:"list"` } @@ -6115,7 +6484,7 @@ func (s *InviteMembersOutput) SetUnprocessedAccounts(v []*Result) *InviteMembers type IpFilter struct { _ struct{} `type:"structure"` - // Finding's CIDR value. + // A finding's CIDR value. Cidr *string `type:"string"` } @@ -6162,11 +6531,11 @@ func (s *KeywordFilter) SetValue(v string) *KeywordFilter { type ListEnabledProductsForImportInput struct { _ struct{} `type:"structure"` - // Indicates the maximum number of items that you want in the response. + // The maximum number of items that you want in the response. MaxResults *int64 `location:"querystring" locationName:"MaxResults" min:"1" type:"integer"` - // Paginates results. Set the value of this parameter to NULL on your first - // call to the ListEnabledProductsForImport operation. For subsequent calls + // Paginates results. On your first call to the ListEnabledProductsForImport + // operation, set the value of this parameter to NULL. For subsequent calls // to the operation, fill nextToken in the request with the value of NextToken // from the previous response to continue listing data. NextToken *string `location:"querystring" locationName:"NextToken" type:"string"` @@ -6242,11 +6611,11 @@ func (s *ListEnabledProductsForImportOutput) SetProductSubscriptions(v []*string type ListInvitationsInput struct { _ struct{} `type:"structure"` - // Indicates the maximum number of items that you want in the response. + // The maximum number of items that you want in the response. MaxResults *int64 `location:"querystring" locationName:"MaxResults" min:"1" type:"integer"` - // Paginates results. Set the value of this parameter to NULL on your first - // call to the ListInvitations operation. For subsequent calls to the operation, + // Paginates results. On your first call to the ListInvitations operation, set + // the value of this parameter to NULL. For subsequent calls to the operation, // fill nextToken in the request with the value of NextToken from the previous // response to continue listing data. NextToken *string `location:"querystring" locationName:"NextToken" type:"string"` @@ -6322,16 +6691,16 @@ func (s *ListInvitationsOutput) SetNextToken(v string) *ListInvitationsOutput { type ListMembersInput struct { _ struct{} `type:"structure"` - // Indicates the maximum number of items that you want in the response. + // The maximum number of items that you want in the response. MaxResults *int64 `location:"querystring" locationName:"MaxResults" min:"1" type:"integer"` // Paginates results. Set the value of this parameter to NULL on your first // call to the ListMembers operation. For subsequent calls to the operation, - // fill nextToken in the request with the value of NextToken from the previous + // fill nextToken in the request with the value of nextToken from the previous // response to continue listing data. NextToken *string `location:"querystring" locationName:"NextToken" type:"string"` - // Specifies what member accounts the response includes based on their relationship + // Specifies which member accounts the response includes based on their relationship // status with the master account. The default value is TRUE. If onlyAssociated // is set to TRUE, the response includes member accounts whose relationship // status with the master is set to ENABLED or DISABLED. If onlyAssociated is @@ -6412,6 +6781,92 @@ func (s *ListMembersOutput) SetNextToken(v string) *ListMembersOutput { return s } +type ListProductSubscribersInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return. + MaxResults *int64 `location:"querystring" locationName:"MaxResults" min:"1" type:"integer"` + + // The token that is required for pagination. + NextToken *string `location:"querystring" locationName:"NextToken" type:"string"` + + // The ARN of the product. + ProductArn *string `location:"querystring" locationName:"ProductArn" type:"string"` +} + +// String returns the string representation +func (s ListProductSubscribersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListProductSubscribersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListProductSubscribersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListProductSubscribersInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListProductSubscribersInput) SetMaxResults(v int64) *ListProductSubscribersInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListProductSubscribersInput) SetNextToken(v string) *ListProductSubscribersInput { + s.NextToken = &v + return s +} + +// SetProductArn sets the ProductArn field's value. +func (s *ListProductSubscribersInput) SetProductArn(v string) *ListProductSubscribersInput { + s.ProductArn = &v + return s +} + +type ListProductSubscribersOutput struct { + _ struct{} `type:"structure"` + + // The token that is required for pagination. + NextToken *string `type:"string"` + + // A list of account IDs that are subscribed to the product. + ProductSubscribers []*string `type:"list"` +} + +// String returns the string representation +func (s ListProductSubscribersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListProductSubscribersOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListProductSubscribersOutput) SetNextToken(v string) *ListProductSubscribersOutput { + s.NextToken = &v + return s +} + +// SetProductSubscribers sets the ProductSubscribers field's value. +func (s *ListProductSubscribersOutput) SetProductSubscribers(v []*string) *ListProductSubscribersOutput { + s.ProductSubscribers = v + return s +} + // A list of malware related to a finding. type Malware struct { _ struct{} `type:"structure"` @@ -6421,7 +6876,7 @@ type Malware struct { // Name is a required field Name *string `type:"string" required:"true"` - // The filesystem path of the malware that was observed. + // The file system path of the malware that was observed. Path *string `type:"string"` // The state of the malware that was observed. @@ -6482,8 +6937,8 @@ func (s *Malware) SetType(v string) *Malware { type MapFilter struct { _ struct{} `type:"structure"` - // Represents the condition to be applied to a key value when querying for findings - // with a map filter. + // The condition to be applied to a key value when querying for findings with + // a map filter. Comparison *string `type:"string" enum:"MapFilterComparison"` // The key of the map filter. @@ -6531,17 +6986,17 @@ type Member struct { // The email of a Security Hub member account. Email *string `type:"string"` - // Time stamp at which the member account was invited to Security Hub. + // The timestamp of when the member account was invited to Security Hub. InvitedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` - // The AWS account ID of the master Security Hub account to this member account. + // The AWS account ID of the Security Hub master account to this member account. MasterId *string `type:"string"` // The status of the relationship between the member account and its master // account. MemberStatus *string `type:"string"` - // Time stamp at which this member account was updated. + // The timestamp of when this member account was updated. UpdatedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` } @@ -6607,7 +7062,7 @@ type Network struct { // The destination port of network-related information about a finding. DestinationPort *int64 `type:"integer"` - // Indicates the direction of network traffic associated with a finding. + // The direction of network traffic associated with a finding. Direction *string `type:"string" enum:"NetworkDirection"` // The protocol of network-related information about a finding. @@ -6830,16 +7285,16 @@ func (s *NoteUpdate) SetUpdatedBy(v string) *NoteUpdate { type NumberFilter struct { _ struct{} `type:"structure"` - // Represents the "equal to" condition to be applied to a single field when - // querying for findings. + // The equal-to condition to be applied to a single field when querying for + // findings. Eq *float64 `type:"double"` - // Represents the "greater than equal" condition to be applied to a single field - // when querying for findings. + // The greater-than-equal condition to be applied to a single field when querying + // for findings. Gte *float64 `type:"double"` - // Represents the "less than equal" condition to be applied to a single field - // when querying for findings. + // The less-than-equal condition to be applied to a single field when querying + // for findings. Lte *float64 `type:"double"` } @@ -6940,8 +7395,96 @@ func (s *ProcessDetails) SetTerminatedAt(v string) *ProcessDetails { return s } -// Provides a recommendation on how to remediate the issue identified within -// a finding. +// Contains details about a product. +type Product struct { + _ struct{} `type:"structure"` + + // The URL used to activate the product. + ActivationUrl *string `type:"string"` + + // The categories assigned to the product. + Categories []*string `type:"list"` + + // The name of the company that provides the product. + CompanyName *string `type:"string"` + + // A description of the product. + Description *string `type:"string"` + + // The URL for the page that contains more information about the product. + MarketplaceUrl *string `type:"string"` + + // The ARN assigned to the product. + // + // ProductArn is a required field + ProductArn *string `type:"string" required:"true"` + + // The name of the product. + ProductName *string `type:"string"` + + // The resource policy asasociated with the product. + ProductSubscriptionResourcePolicy *string `type:"string"` +} + +// String returns the string representation +func (s Product) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Product) GoString() string { + return s.String() +} + +// SetActivationUrl sets the ActivationUrl field's value. +func (s *Product) SetActivationUrl(v string) *Product { + s.ActivationUrl = &v + return s +} + +// SetCategories sets the Categories field's value. +func (s *Product) SetCategories(v []*string) *Product { + s.Categories = v + return s +} + +// SetCompanyName sets the CompanyName field's value. +func (s *Product) SetCompanyName(v string) *Product { + s.CompanyName = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *Product) SetDescription(v string) *Product { + s.Description = &v + return s +} + +// SetMarketplaceUrl sets the MarketplaceUrl field's value. +func (s *Product) SetMarketplaceUrl(v string) *Product { + s.MarketplaceUrl = &v + return s +} + +// SetProductArn sets the ProductArn field's value. +func (s *Product) SetProductArn(v string) *Product { + s.ProductArn = &v + return s +} + +// SetProductName sets the ProductName field's value. +func (s *Product) SetProductName(v string) *Product { + s.ProductName = &v + return s +} + +// SetProductSubscriptionResourcePolicy sets the ProductSubscriptionResourcePolicy field's value. +func (s *Product) SetProductSubscriptionResourcePolicy(v string) *Product { + s.ProductSubscriptionResourcePolicy = &v + return s +} + +// A recommendation on how to remediate the issue identified in a finding. type Recommendation struct { _ struct{} `type:"structure"` @@ -6975,7 +7518,7 @@ func (s *Recommendation) SetUrl(v string) *Recommendation { return s } -// Related finding's details. +// A related finding's details. type RelatedFinding struct { _ struct{} `type:"structure"` @@ -7032,8 +7575,7 @@ func (s *RelatedFinding) SetProductArn(v string) *RelatedFinding { type Remediation struct { _ struct{} `type:"structure"` - // Provides a recommendation on how to remediate the issue identified within - // a finding. + // A recommendation on how to remediate the issue identified within a finding. Recommendation *Recommendation `type:"structure"` } @@ -7053,11 +7595,11 @@ func (s *Remediation) SetRecommendation(v *Recommendation) *Remediation { return s } -// A resource data type that describes a resource to which the finding refers. +// A resource data type that describes a resource that the finding refers to. type Resource struct { _ struct{} `type:"structure"` - // Provides additional details about the resource. + // Additional details about the resource. Details *ResourceDetails `type:"structure"` // The canonical identifier for the given resource type. @@ -7065,17 +7607,17 @@ type Resource struct { // Id is a required field Id *string `type:"string" required:"true"` - // The canonical AWS partition name to which the region is assigned. + // The canonical AWS partition name that the Region is assigned to. Partition *string `type:"string" enum:"Partition"` - // The canonical AWS external region name where this resource is located. + // The canonical AWS external Region name where this resource is located. Region *string `type:"string"` // A list of AWS tags associated with a resource at the time the finding was // processed. Tags map[string]*string `type:"map"` - // Specifies the type of the resource for which details are provided. + // The type of the resource that details are provided for. // // Type is a required field Type *string `type:"string" required:"true"` @@ -7143,24 +7685,24 @@ func (s *Resource) SetType(v string) *Resource { return s } -// Provides additional details about the resource. +// Additional details about the resource. type ResourceDetails struct { _ struct{} `type:"structure"` - // The details of an AWS EC2 instance. + // The details of an Amazon EC2 instance. AwsEc2Instance *AwsEc2InstanceDetails `type:"structure"` - // AWS IAM access key details related to a finding. + // IAM access key details related to a finding. AwsIamAccessKey *AwsIamAccessKeyDetails `type:"structure"` - // The details of an AWS S3 Bucket. + // The details of an Amazon S3 Bucket. AwsS3Bucket *AwsS3BucketDetails `type:"structure"` // Container details related to a finding. Container *ContainerDetails `type:"structure"` - // The details of a resource that does not have a specific sub-field for the - // resource type defined. + // The details of a resource that doesn't have a specific subfield for the resource + // type defined. Other map[string]*string `type:"map"` } @@ -7204,14 +7746,14 @@ func (s *ResourceDetails) SetOther(v map[string]*string) *ResourceDetails { return s } -// The account details that could not be processed. +// The account details that couldn't be processed. type Result struct { _ struct{} `type:"structure"` - // An ID of the AWS account that could not be processed. + // An ID of the AWS account that couldn't be processed. AccountId *string `type:"string"` - // The reason for why an account could not be processed. + // The reason for why an account couldn't be processed. ProcessingResult *string `type:"string"` } @@ -7246,7 +7788,7 @@ type Severity struct { // Normalized is a required field Normalized *int64 `type:"integer" required:"true"` - // The native severity as defined by the security findings provider's solution + // The native severity as defined by the security-findings provider's solution // that generated the finding. Product *float64 `type:"double"` } @@ -7325,7 +7867,7 @@ type StandardsSubscription struct { // The ARN of a standard. // - // In this release, Security Hub only supports the CIS AWS Foundations standard. + // In this release, Security Hub supports only the CIS AWS Foundations standard. // // Its ARN is arn:aws:securityhub:::ruleset/cis-aws-foundations-benchmark/v/1.2.0. // @@ -7435,8 +7977,7 @@ func (s *StandardsSubscriptionRequest) SetStandardsInput(v map[string]*string) * type StringFilter struct { _ struct{} `type:"structure"` - // Represents the condition to be applied to a string value when querying for - // findings. + // The condition to be applied to a string value when querying for findings. Comparison *string `type:"string" enum:"StringFilterComparison"` // The string filter value. @@ -7537,7 +8078,7 @@ func (s *ThreatIntelIndicator) SetValue(v string) *ThreatIntelIndicator { type UpdateFindingsInput struct { _ struct{} `type:"structure"` - // A collection of attributes that specify what findings you want to update. + // A collection of attributes that specify which findings you want to update. // // Filters is a required field Filters *AwsSecurityFindingFilters `type:"structure" required:"true"` @@ -7777,8 +8318,8 @@ const ( ) const ( - // MapFilterComparisonContains is a MapFilterComparison enum value - MapFilterComparisonContains = "CONTAINS" + // MapFilterComparisonEquals is a MapFilterComparison enum value + MapFilterComparisonEquals = "EQUALS" ) const ( @@ -7828,15 +8369,15 @@ const ( // StandardsStatusDeleting is a StandardsStatus enum value StandardsStatusDeleting = "DELETING" + + // StandardsStatusIncomplete is a StandardsStatus enum value + StandardsStatusIncomplete = "INCOMPLETE" ) const ( // StringFilterComparisonEquals is a StringFilterComparison enum value StringFilterComparisonEquals = "EQUALS" - // StringFilterComparisonContains is a StringFilterComparison enum value - StringFilterComparisonContains = "CONTAINS" - // StringFilterComparisonPrefix is a StringFilterComparison enum value StringFilterComparisonPrefix = "PREFIX" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/securityhub/doc.go b/vendor/github.com/aws/aws-sdk-go/service/securityhub/doc.go index 3ba53ebde2e..6ae70604f0b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/securityhub/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/securityhub/doc.go @@ -4,13 +4,13 @@ // requests to AWS SecurityHub. // // AWS Security Hub provides you with a comprehensive view of your security -// state within AWS and your compliance with the security industry standards -// and best practices. Security Hub collects security data from across AWS accounts, +// state in AWS and your compliance with the security industry standards and +// best practices. Security Hub collects security data from across AWS accounts, // services, and supported third-party partners and helps you analyze your security // trends and identify the highest priority security issues. For more information, // see AWS Security Hub User Guide (https://docs.aws.amazon.com/securityhub/latest/userguide/what-is-securityhub.html). // -// Currently, AWS Security Hub is in Preview release. +// Important: AWS Security Hub is currently in Preview release. // // See https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26 for more information on this service. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/securityhub/errors.go b/vendor/github.com/aws/aws-sdk-go/service/securityhub/errors.go index 6c2941282c4..ea15ffb030d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/securityhub/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/securityhub/errors.go @@ -7,7 +7,7 @@ const ( // ErrCodeAccessDeniedException for service response error code // "AccessDeniedException". // - // You do not have permission to to perform the action specified in the request. + // You don't have permission to perform the action specified in the request. ErrCodeAccessDeniedException = "AccessDeniedException" // ErrCodeInternalException for service response error code @@ -19,14 +19,14 @@ const ( // ErrCodeInvalidAccessException for service response error code // "InvalidAccessException". // - // AWS Security Hub is not enabled for the account used to make this request. + // AWS Security Hub isn't enabled for the account used to make this request. ErrCodeInvalidAccessException = "InvalidAccessException" // ErrCodeInvalidInputException for service response error code // "InvalidInputException". // - // The request was rejected because an invalid or out-of-range value was supplied - // for an input parameter. + // The request was rejected because you supplied an invalid or out-of-range + // value for an input parameter. ErrCodeInvalidInputException = "InvalidInputException" // ErrCodeLimitExceededException for service response error code @@ -45,6 +45,6 @@ const ( // ErrCodeResourceNotFoundException for service response error code // "ResourceNotFoundException". // - // The request was rejected because the specified resource cannot be found. + // The request was rejected because we can't find the specified resource. ErrCodeResourceNotFoundException = "ResourceNotFoundException" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/api.go b/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/api.go index 3351c797b64..9be3f7e5ba6 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/api.go @@ -6232,6 +6232,90 @@ func (c *ServiceCatalog) ListServiceActionsForProvisioningArtifactPagesWithConte return p.Err() } +const opListStackInstancesForProvisionedProduct = "ListStackInstancesForProvisionedProduct" + +// ListStackInstancesForProvisionedProductRequest generates a "aws/request.Request" representing the +// client's request for the ListStackInstancesForProvisionedProduct operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListStackInstancesForProvisionedProduct for more information on using the ListStackInstancesForProvisionedProduct +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListStackInstancesForProvisionedProductRequest method. +// req, resp := client.ListStackInstancesForProvisionedProductRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/servicecatalog-2015-12-10/ListStackInstancesForProvisionedProduct +func (c *ServiceCatalog) ListStackInstancesForProvisionedProductRequest(input *ListStackInstancesForProvisionedProductInput) (req *request.Request, output *ListStackInstancesForProvisionedProductOutput) { + op := &request.Operation{ + Name: opListStackInstancesForProvisionedProduct, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListStackInstancesForProvisionedProductInput{} + } + + output = &ListStackInstancesForProvisionedProductOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListStackInstancesForProvisionedProduct API operation for AWS Service Catalog. +// +// Returns summary information about stack instances that are associated with +// the specified CFN_STACKSET type provisioned product. You can filter for stack +// instances that are associated with a specific AWS account name or region. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Service Catalog's +// API operation ListStackInstancesForProvisionedProduct for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParametersException "InvalidParametersException" +// One or more parameters provided to the operation are not valid. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The specified resource was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/servicecatalog-2015-12-10/ListStackInstancesForProvisionedProduct +func (c *ServiceCatalog) ListStackInstancesForProvisionedProduct(input *ListStackInstancesForProvisionedProductInput) (*ListStackInstancesForProvisionedProductOutput, error) { + req, out := c.ListStackInstancesForProvisionedProductRequest(input) + return out, req.Send() +} + +// ListStackInstancesForProvisionedProductWithContext is the same as ListStackInstancesForProvisionedProduct with the addition of +// the ability to pass a context and additional request options. +// +// See ListStackInstancesForProvisionedProduct for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ServiceCatalog) ListStackInstancesForProvisionedProductWithContext(ctx aws.Context, input *ListStackInstancesForProvisionedProductInput, opts ...request.Option) (*ListStackInstancesForProvisionedProductOutput, error) { + req, out := c.ListStackInstancesForProvisionedProductRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListTagOptions = "ListTagOptions" // ListTagOptionsRequest generates a "aws/request.Request" representing the @@ -14745,6 +14829,114 @@ func (s *ListServiceActionsOutput) SetServiceActionSummaries(v []*ServiceActionS return s } +type ListStackInstancesForProvisionedProductInput struct { + _ struct{} `type:"structure"` + + // The language code. + // + // * en - English (default) + // + // * jp - Japanese + // + // * zh - Chinese + AcceptLanguage *string `type:"string"` + + // The maximum number of items to return with this call. + PageSize *int64 `type:"integer"` + + // The page token for the next set of results. To retrieve the first set of + // results, use null. + PageToken *string `type:"string"` + + // The identifier of the provisioned product. + // + // ProvisionedProductId is a required field + ProvisionedProductId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListStackInstancesForProvisionedProductInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStackInstancesForProvisionedProductInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListStackInstancesForProvisionedProductInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListStackInstancesForProvisionedProductInput"} + if s.ProvisionedProductId == nil { + invalidParams.Add(request.NewErrParamRequired("ProvisionedProductId")) + } + if s.ProvisionedProductId != nil && len(*s.ProvisionedProductId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ProvisionedProductId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAcceptLanguage sets the AcceptLanguage field's value. +func (s *ListStackInstancesForProvisionedProductInput) SetAcceptLanguage(v string) *ListStackInstancesForProvisionedProductInput { + s.AcceptLanguage = &v + return s +} + +// SetPageSize sets the PageSize field's value. +func (s *ListStackInstancesForProvisionedProductInput) SetPageSize(v int64) *ListStackInstancesForProvisionedProductInput { + s.PageSize = &v + return s +} + +// SetPageToken sets the PageToken field's value. +func (s *ListStackInstancesForProvisionedProductInput) SetPageToken(v string) *ListStackInstancesForProvisionedProductInput { + s.PageToken = &v + return s +} + +// SetProvisionedProductId sets the ProvisionedProductId field's value. +func (s *ListStackInstancesForProvisionedProductInput) SetProvisionedProductId(v string) *ListStackInstancesForProvisionedProductInput { + s.ProvisionedProductId = &v + return s +} + +type ListStackInstancesForProvisionedProductOutput struct { + _ struct{} `type:"structure"` + + // The page token to use to retrieve the next set of results. If there are no + // additional results, this value is null. + NextPageToken *string `type:"string"` + + // List of stack instances. + StackInstances []*StackInstance `type:"list"` +} + +// String returns the string representation +func (s ListStackInstancesForProvisionedProductOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListStackInstancesForProvisionedProductOutput) GoString() string { + return s.String() +} + +// SetNextPageToken sets the NextPageToken field's value. +func (s *ListStackInstancesForProvisionedProductOutput) SetNextPageToken(v string) *ListStackInstancesForProvisionedProductOutput { + s.NextPageToken = &v + return s +} + +// SetStackInstances sets the StackInstances field's value. +func (s *ListStackInstancesForProvisionedProductOutput) SetStackInstances(v []*StackInstance) *ListStackInstancesForProvisionedProductOutput { + s.StackInstances = v + return s +} + // Filters to use when listing TagOptions. type ListTagOptionsFilters struct { _ struct{} `type:"structure"` @@ -17928,6 +18120,68 @@ func (s *ShareError) SetMessage(v string) *ShareError { return s } +// An AWS CloudFormation stack, in a specific account and region, that's part +// of a stack set operation. A stack instance is a reference to an attempted +// or actual stack in a given account within a given region. A stack instance +// can exist without a stack—for example, if the stack couldn't be created +// for some reason. A stack instance is associated with only one stack set. +// Each stack instance contains the ID of its associated stack set, as well +// as the ID of the actual stack and the stack status. +type StackInstance struct { + _ struct{} `type:"structure"` + + // The name of the AWS account that the stack instance is associated with. + Account *string `type:"string"` + + // The name of the AWS region that the stack instance is associated with. + Region *string `type:"string"` + + // The status of the stack instance, in terms of its synchronization with its + // associated stack set. + // + // * INOPERABLE: A DeleteStackInstances operation has failed and left the + // stack in an unstable state. Stacks in this state are excluded from further + // UpdateStackSet operations. You might need to perform a DeleteStackInstances + // operation, with RetainStacks set to true, to delete the stack instance, + // and then delete the stack manually. + // + // * OUTDATED: The stack isn't currently up to date with the stack set because + // either the associated stack failed during a CreateStackSet or UpdateStackSet + // operation, or the stack was part of a CreateStackSet or UpdateStackSet + // operation that failed or was stopped before the stack was created or updated. + // + // * CURRENT: The stack is currently up to date with the stack set. + StackInstanceStatus *string `type:"string" enum:"StackInstanceStatus"` +} + +// String returns the string representation +func (s StackInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StackInstance) GoString() string { + return s.String() +} + +// SetAccount sets the Account field's value. +func (s *StackInstance) SetAccount(v string) *StackInstance { + s.Account = &v + return s +} + +// SetRegion sets the Region field's value. +func (s *StackInstance) SetRegion(v string) *StackInstance { + s.Region = &v + return s +} + +// SetStackInstanceStatus sets the StackInstanceStatus field's value. +func (s *StackInstance) SetStackInstanceStatus(v string) *StackInstance { + s.StackInstanceStatus = &v + return s +} + // Information about a tag. A tag is a key-value pair. Tags are propagated to // the resources created when provisioning a product. type Tag struct { @@ -19935,6 +20189,17 @@ const ( SortOrderDescending = "DESCENDING" ) +const ( + // StackInstanceStatusCurrent is a StackInstanceStatus enum value + StackInstanceStatusCurrent = "CURRENT" + + // StackInstanceStatusOutdated is a StackInstanceStatus enum value + StackInstanceStatusOutdated = "OUTDATED" + + // StackInstanceStatusInoperable is a StackInstanceStatus enum value + StackInstanceStatusInoperable = "INOPERABLE" +) + const ( // StackSetOperationTypeCreate is a StackSetOperationType enum value StackSetOperationTypeCreate = "CREATE" diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssm/api.go b/vendor/github.com/aws/aws-sdk-go/service/ssm/api.go index 55626825364..56615369d33 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ssm/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ssm/api.go @@ -59,8 +59,8 @@ func (c *SSM) AddTagsToResourceRequest(input *AddTagsToResourceInput) (req *requ // AddTagsToResource API operation for Amazon Simple Systems Manager (SSM). // // Adds or overwrites one or more tags for the specified resource. Tags are -// metadata that you can assign to your documents, managed instances, Maintenance -// Windows, Parameter Store parameters, and patch baselines. Tags enable you +// metadata that you can assign to your documents, managed instances, maintenance +// windows, Parameter Store parameters, and patch baselines. Tags enable you // to categorize your resources in different ways, for example, by purpose, // owner, or environment. Each tag consists of a key and an optional value, // both of which you define. For example, you could define a set of tags for @@ -273,7 +273,7 @@ func (c *SSM) CancelMaintenanceWindowExecutionRequest(input *CancelMaintenanceWi // CancelMaintenanceWindowExecution API operation for Amazon Simple Systems Manager (SSM). // -// Stops a Maintenance Window execution that is already in progress and cancels +// Stops a maintenance window execution that is already in progress and cancels // any tasks in the window that have not already starting running. (Tasks already // in progress will continue to completion.) // @@ -289,8 +289,8 @@ func (c *SSM) CancelMaintenanceWindowExecutionRequest(input *CancelMaintenanceWi // An error occurred on the server side. // // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -364,8 +364,8 @@ func (c *SSM) CreateActivationRequest(input *CreateActivationInput) (req *reques // Registers your on-premises server or virtual machine with Amazon EC2 so that // you can manage these resources using Run Command. An on-premises server or // virtual machine that has been registered with EC2 is called a managed instance. -// For more information about activations, see Setting Up Systems Manager in -// Hybrid Environments (http://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-managedinstances.html). +// For more information about activations, see Setting Up AWS Systems Manager +// for Hybrid Environments (http://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-managedinstances.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -807,7 +807,7 @@ func (c *SSM) CreateMaintenanceWindowRequest(input *CreateMaintenanceWindowInput // CreateMaintenanceWindow API operation for Amazon Simple Systems Manager (SSM). // -// Creates a new Maintenance Window. +// Creates a new maintenance window. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -823,7 +823,7 @@ func (c *SSM) CreateMaintenanceWindowRequest(input *CreateMaintenanceWindowInput // // * ErrCodeResourceLimitExceededException "ResourceLimitExceededException" // Error returned when the caller has exceeded the default resource limits. -// For example, too many Maintenance Windows or Patch baselines have been created. +// For example, too many maintenance windows or patch baselines have been created. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -916,7 +916,7 @@ func (c *SSM) CreatePatchBaselineRequest(input *CreatePatchBaselineInput) (req * // // * ErrCodeResourceLimitExceededException "ResourceLimitExceededException" // Error returned when the caller has exceeded the default resource limits. -// For example, too many Maintenance Windows or Patch baselines have been created. +// For example, too many maintenance windows or patch baselines have been created. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -1484,7 +1484,7 @@ func (c *SSM) DeleteMaintenanceWindowRequest(input *DeleteMaintenanceWindowInput // DeleteMaintenanceWindow API operation for Amazon Simple Systems Manager (SSM). // -// Deletes a Maintenance Window. +// Deletes a maintenance window. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2075,7 +2075,7 @@ func (c *SSM) DeregisterTargetFromMaintenanceWindowRequest(input *DeregisterTarg // DeregisterTargetFromMaintenanceWindow API operation for Amazon Simple Systems Manager (SSM). // -// Removes a target from a Maintenance Window. +// Removes a target from a maintenance window. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2086,8 +2086,8 @@ func (c *SSM) DeregisterTargetFromMaintenanceWindowRequest(input *DeregisterTarg // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -2165,7 +2165,7 @@ func (c *SSM) DeregisterTaskFromMaintenanceWindowRequest(input *DeregisterTaskFr // DeregisterTaskFromMaintenanceWindow API operation for Amazon Simple Systems Manager (SSM). // -// Removes a task from a Maintenance Window. +// Removes a task from a maintenance window. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2176,8 +2176,8 @@ func (c *SSM) DeregisterTaskFromMaintenanceWindowRequest(input *DeregisterTaskFr // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -3225,8 +3225,8 @@ func (c *SSM) DescribeEffectivePatchesForPatchBaselineRequest(input *DescribeEff // try again. // // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -3855,7 +3855,7 @@ func (c *SSM) DescribeInventoryDeletionsRequest(input *DescribeInventoryDeletion // An error occurred on the server side. // // * ErrCodeInvalidDeletionIdException "InvalidDeletionIdException" -// The ID specified for the delete operation does not exist or is not valide. +// The ID specified for the delete operation does not exist or is not valid. // Verify the ID and try again. // // * ErrCodeInvalidNextToken "InvalidNextToken" @@ -3928,7 +3928,7 @@ func (c *SSM) DescribeMaintenanceWindowExecutionTaskInvocationsRequest(input *De // DescribeMaintenanceWindowExecutionTaskInvocations API operation for Amazon Simple Systems Manager (SSM). // // Retrieves the individual task executions (one per target) for a particular -// task run as part of a Maintenance Window execution. +// task run as part of a maintenance window execution. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3939,8 +3939,8 @@ func (c *SSM) DescribeMaintenanceWindowExecutionTaskInvocationsRequest(input *De // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -4014,7 +4014,7 @@ func (c *SSM) DescribeMaintenanceWindowExecutionTasksRequest(input *DescribeMain // DescribeMaintenanceWindowExecutionTasks API operation for Amazon Simple Systems Manager (SSM). // -// For a given Maintenance Window execution, lists the tasks that were run. +// For a given maintenance window execution, lists the tasks that were run. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4025,8 +4025,8 @@ func (c *SSM) DescribeMaintenanceWindowExecutionTasksRequest(input *DescribeMain // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -4100,9 +4100,9 @@ func (c *SSM) DescribeMaintenanceWindowExecutionsRequest(input *DescribeMaintena // DescribeMaintenanceWindowExecutions API operation for Amazon Simple Systems Manager (SSM). // -// Lists the executions of a Maintenance Window. This includes information about -// when the Maintenance Window was scheduled to be active, and information about -// tasks registered and run with the Maintenance Window. +// Lists the executions of a maintenance window. This includes information about +// when the maintenance window was scheduled to be active, and information about +// tasks registered and run with the maintenance window. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4181,7 +4181,7 @@ func (c *SSM) DescribeMaintenanceWindowScheduleRequest(input *DescribeMaintenanc // DescribeMaintenanceWindowSchedule API operation for Amazon Simple Systems Manager (SSM). // -// Retrieves information about upcoming executions of a Maintenance Window. +// Retrieves information about upcoming executions of a maintenance window. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4195,8 +4195,8 @@ func (c *SSM) DescribeMaintenanceWindowScheduleRequest(input *DescribeMaintenanc // An error occurred on the server side. // // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -4267,7 +4267,7 @@ func (c *SSM) DescribeMaintenanceWindowTargetsRequest(input *DescribeMaintenance // DescribeMaintenanceWindowTargets API operation for Amazon Simple Systems Manager (SSM). // -// Lists the targets registered with the Maintenance Window. +// Lists the targets registered with the maintenance window. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4278,8 +4278,8 @@ func (c *SSM) DescribeMaintenanceWindowTargetsRequest(input *DescribeMaintenance // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -4353,7 +4353,7 @@ func (c *SSM) DescribeMaintenanceWindowTasksRequest(input *DescribeMaintenanceWi // DescribeMaintenanceWindowTasks API operation for Amazon Simple Systems Manager (SSM). // -// Lists the tasks in a Maintenance Window. +// Lists the tasks in a maintenance window. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4364,8 +4364,8 @@ func (c *SSM) DescribeMaintenanceWindowTasksRequest(input *DescribeMaintenanceWi // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -4439,7 +4439,7 @@ func (c *SSM) DescribeMaintenanceWindowsRequest(input *DescribeMaintenanceWindow // DescribeMaintenanceWindows API operation for Amazon Simple Systems Manager (SSM). // -// Retrieves the Maintenance Windows in an AWS account. +// Retrieves the maintenance windows in an AWS account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4518,7 +4518,7 @@ func (c *SSM) DescribeMaintenanceWindowsForTargetRequest(input *DescribeMaintena // DescribeMaintenanceWindowsForTarget API operation for Amazon Simple Systems Manager (SSM). // -// Retrieves information about the Maintenance Windows targets or tasks that +// Retrieves information about the maintenance window targets or tasks that // an instance is associated with. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5313,7 +5313,7 @@ func (c *SSM) GetCommandInvocationRequest(input *GetCommandInvocationInput) (req // // * ErrCodeInvocationDoesNotExist "InvocationDoesNotExist" // The command ID and instance ID you specified did not match any invocations. -// Verify the command ID adn the instance ID and try again. +// Verify the command ID and the instance ID and try again. // // See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetCommandInvocation func (c *SSM) GetCommandInvocation(input *GetCommandInvocationInput) (*GetCommandInvocationOutput, error) { @@ -5565,6 +5565,11 @@ func (c *SSM) GetDeployablePatchSnapshotForInstanceRequest(input *GetDeployableP // Windows, AmazonLinux, RedhatEnterpriseLinux, and Ubuntu. // // * ErrCodeUnsupportedFeatureRequiredException "UnsupportedFeatureRequiredException" +// Microsoft application patching is only available on EC2 instances and Advanced +// Instances. To patch Microsoft applications on on-premises servers and VMs, +// you must enable Advanced Instances. For more information, see Using the Advanced-Instances +// Tier (http://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-managedinstances-advanced.html) +// in the AWS Systems Manager User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetDeployablePatchSnapshotForInstance func (c *SSM) GetDeployablePatchSnapshotForInstance(input *GetDeployablePatchSnapshotForInstanceInput) (*GetDeployablePatchSnapshotForInstanceOutput, error) { @@ -5902,7 +5907,7 @@ func (c *SSM) GetMaintenanceWindowRequest(input *GetMaintenanceWindowInput) (req // GetMaintenanceWindow API operation for Amazon Simple Systems Manager (SSM). // -// Retrieves a Maintenance Window. +// Retrieves a maintenance window. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5913,8 +5918,8 @@ func (c *SSM) GetMaintenanceWindowRequest(input *GetMaintenanceWindowInput) (req // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -5988,7 +5993,7 @@ func (c *SSM) GetMaintenanceWindowExecutionRequest(input *GetMaintenanceWindowEx // GetMaintenanceWindowExecution API operation for Amazon Simple Systems Manager (SSM). // -// Retrieves details about a specific task run as part of a Maintenance Window +// Retrieves details about a specific task run as part of a maintenance window // execution. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -6000,8 +6005,8 @@ func (c *SSM) GetMaintenanceWindowExecutionRequest(input *GetMaintenanceWindowEx // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -6075,8 +6080,8 @@ func (c *SSM) GetMaintenanceWindowExecutionTaskRequest(input *GetMaintenanceWind // GetMaintenanceWindowExecutionTask API operation for Amazon Simple Systems Manager (SSM). // -// Retrieves the details about a specific task run as part of a Maintenance -// Window execution. +// Retrieves the details about a specific task run as part of a maintenance +// window execution. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6087,8 +6092,8 @@ func (c *SSM) GetMaintenanceWindowExecutionTaskRequest(input *GetMaintenanceWind // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -6163,7 +6168,7 @@ func (c *SSM) GetMaintenanceWindowExecutionTaskInvocationRequest(input *GetMaint // GetMaintenanceWindowExecutionTaskInvocation API operation for Amazon Simple Systems Manager (SSM). // // Retrieves a task invocation. A task invocation is a specific task running -// on a specific target. Maintenance Windows report status for all invocations. +// on a specific target. maintenance windows report status for all invocations. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6174,8 +6179,8 @@ func (c *SSM) GetMaintenanceWindowExecutionTaskInvocationRequest(input *GetMaint // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -6249,7 +6254,7 @@ func (c *SSM) GetMaintenanceWindowTaskRequest(input *GetMaintenanceWindowTaskInp // GetMaintenanceWindowTask API operation for Amazon Simple Systems Manager (SSM). // -// Lists the tasks in a Maintenance Window. +// Lists the tasks in a maintenance window. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6260,8 +6265,8 @@ func (c *SSM) GetMaintenanceWindowTaskRequest(input *GetMaintenanceWindowTaskInp // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -6826,8 +6831,8 @@ func (c *SSM) GetPatchBaselineRequest(input *GetPatchBaselineInput) (req *reques // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -9045,8 +9050,8 @@ func (c *SSM) RegisterDefaultPatchBaselineRequest(input *RegisterDefaultPatchBas // try again. // // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -9135,8 +9140,8 @@ func (c *SSM) RegisterPatchBaselineForPatchGroupRequest(input *RegisterPatchBase // baseline that is already registered with a different patch baseline. // // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -9147,7 +9152,7 @@ func (c *SSM) RegisterPatchBaselineForPatchGroupRequest(input *RegisterPatchBase // // * ErrCodeResourceLimitExceededException "ResourceLimitExceededException" // Error returned when the caller has exceeded the default resource limits. -// For example, too many Maintenance Windows or Patch baselines have been created. +// For example, too many maintenance windows or patch baselines have been created. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -9221,7 +9226,7 @@ func (c *SSM) RegisterTargetWithMaintenanceWindowRequest(input *RegisterTargetWi // RegisterTargetWithMaintenanceWindow API operation for Amazon Simple Systems Manager (SSM). // -// Registers a target with a Maintenance Window. +// Registers a target with a maintenance window. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -9236,15 +9241,15 @@ func (c *SSM) RegisterTargetWithMaintenanceWindowRequest(input *RegisterTargetWi // don't match the original call to the API with the same idempotency token. // // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). // // * ErrCodeResourceLimitExceededException "ResourceLimitExceededException" // Error returned when the caller has exceeded the default resource limits. -// For example, too many Maintenance Windows or Patch baselines have been created. +// For example, too many maintenance windows or patch baselines have been created. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -9318,7 +9323,7 @@ func (c *SSM) RegisterTaskWithMaintenanceWindowRequest(input *RegisterTaskWithMa // RegisterTaskWithMaintenanceWindow API operation for Amazon Simple Systems Manager (SSM). // -// Adds a new task to a Maintenance Window. +// Adds a new task to a maintenance window. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -9333,15 +9338,15 @@ func (c *SSM) RegisterTaskWithMaintenanceWindowRequest(input *RegisterTaskWithMa // don't match the original call to the API with the same idempotency token. // // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). // // * ErrCodeResourceLimitExceededException "ResourceLimitExceededException" // Error returned when the caller has exceeded the default resource limits. -// For example, too many Maintenance Windows or Patch baselines have been created. +// For example, too many maintenance windows or patch baselines have been created. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -9628,8 +9633,8 @@ func (c *SSM) ResumeSessionRequest(input *ResumeSessionInput) (req *request.Requ // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -10305,8 +10310,8 @@ func (c *SSM) TerminateSessionRequest(input *TerminateSessionInput) (req *reques // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -10809,7 +10814,7 @@ func (c *SSM) UpdateMaintenanceWindowRequest(input *UpdateMaintenanceWindowInput // UpdateMaintenanceWindow API operation for Amazon Simple Systems Manager (SSM). // -// Updates an existing Maintenance Window. Only specified parameters are modified. +// Updates an existing maintenance window. Only specified parameters are modified. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -10820,8 +10825,8 @@ func (c *SSM) UpdateMaintenanceWindowRequest(input *UpdateMaintenanceWindowInput // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -10895,7 +10900,7 @@ func (c *SSM) UpdateMaintenanceWindowTargetRequest(input *UpdateMaintenanceWindo // UpdateMaintenanceWindowTarget API operation for Amazon Simple Systems Manager (SSM). // -// Modifies the target of an existing Maintenance Window. You can't change the +// Modifies the target of an existing maintenance window. You can't change the // target type, but you can change the following: // // The target from being an ID target to a Tag target, or a Tag target to an @@ -10922,8 +10927,8 @@ func (c *SSM) UpdateMaintenanceWindowTargetRequest(input *UpdateMaintenanceWindo // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -10997,7 +11002,7 @@ func (c *SSM) UpdateMaintenanceWindowTaskRequest(input *UpdateMaintenanceWindowT // UpdateMaintenanceWindowTask API operation for Amazon Simple Systems Manager (SSM). // -// Modifies a task assigned to a Maintenance Window. You can't change the task +// Modifies a task assigned to a maintenance window. You can't change the task // type, but you can change the following values: // // * TaskARN. For example, you can change a RUN_COMMAND task from AWS-RunPowerShellScript @@ -11027,8 +11032,8 @@ func (c *SSM) UpdateMaintenanceWindowTaskRequest(input *UpdateMaintenanceWindowT // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -11213,8 +11218,8 @@ func (c *SSM) UpdatePatchBaselineRequest(input *UpdatePatchBaselineInput) (req * // // Returned Error Codes: // * ErrCodeDoesNotExistException "DoesNotExistException" -// Error returned when the ID specified for a resource, such as a Maintenance -// Window or Patch baseline, doesn't exist. +// Error returned when the ID specified for a resource, such as a maintenance +// window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -11471,7 +11476,7 @@ type AddTagsToResourceInput struct { // For the Document and Parameter values, use the name of the resource. // // The ManagedInstance type for this API action is only for on-premises managed - // instances. You must specify the the name of the managed instance in the following + // instances. You must specify the name of the managed instance in the following // format: mi-ID_number. For example, mi-1a2b3c4d5e6f. // // ResourceId is a required field @@ -11480,7 +11485,7 @@ type AddTagsToResourceInput struct { // Specifies the type of resource you are tagging. // // The ManagedInstance type for this API action is for on-premises managed instances. - // You must specify the the name of the managed instance in the following format: + // You must specify the name of the managed instance in the following format: // mi-ID_number. For example, mi-1a2b3c4d5e6f. // // ResourceType is a required field @@ -13265,7 +13270,7 @@ func (s CancelCommandOutput) GoString() string { type CancelMaintenanceWindowExecutionInput struct { _ struct{} `type:"structure"` - // The ID of the Maintenance Window execution to stop. + // The ID of the maintenance window execution to stop. // // WindowExecutionId is a required field WindowExecutionId *string `min:"36" type:"string" required:"true"` @@ -13306,7 +13311,7 @@ func (s *CancelMaintenanceWindowExecutionInput) SetWindowExecutionId(v string) * type CancelMaintenanceWindowExecutionOutput struct { _ struct{} `type:"structure"` - // The ID of the Maintenance Window execution that has been stopped. + // The ID of the maintenance window execution that has been stopped. WindowExecutionId *string `min:"36" type:"string"` } @@ -15389,13 +15394,13 @@ func (s *CreateDocumentOutput) SetDocumentDescription(v *DocumentDescription) *C type CreateMaintenanceWindowInput struct { _ struct{} `type:"structure"` - // Enables a Maintenance Window task to run on managed instances, even if you + // Enables a maintenance window task to run on managed instances, even if you // have not registered those instances as targets. If enabled, then you must // specify the unregistered instances (by instance ID) when you register a task - // with the Maintenance Window + // with the maintenance window. // // If you don't enable this option, then you must specify previously-registered - // targets when you register a task with the Maintenance Window. + // targets when you register a task with the maintenance window. // // AllowUnassociatedTargets is a required field AllowUnassociatedTargets *bool `type:"boolean" required:"true"` @@ -15403,50 +15408,50 @@ type CreateMaintenanceWindowInput struct { // User-provided idempotency token. ClientToken *string `min:"1" type:"string" idempotencyToken:"true"` - // The number of hours before the end of the Maintenance Window that Systems + // The number of hours before the end of the maintenance window that Systems // Manager stops scheduling new tasks for execution. // // Cutoff is a required field Cutoff *int64 `type:"integer" required:"true"` - // An optional description for the Maintenance Window. We recommend specifying - // a description to help you organize your Maintenance Windows. + // An optional description for the maintenance window. We recommend specifying + // a description to help you organize your maintenance windows. Description *string `min:"1" type:"string" sensitive:"true"` - // The duration of the Maintenance Window in hours. + // The duration of the maintenance window in hours. // // Duration is a required field Duration *int64 `min:"1" type:"integer" required:"true"` - // The date and time, in ISO-8601 Extended format, for when you want the Maintenance - // Window to become inactive. EndDate allows you to set a date and time in the - // future when the Maintenance Window will no longer run. + // The date and time, in ISO-8601 Extended format, for when you want the maintenance + // window to become inactive. EndDate allows you to set a date and time in the + // future when the maintenance window will no longer run. EndDate *string `type:"string"` - // The name of the Maintenance Window. + // The name of the maintenance window. // // Name is a required field Name *string `min:"3" type:"string" required:"true"` - // The schedule of the Maintenance Window in the form of a cron or rate expression. + // The schedule of the maintenance window in the form of a cron or rate expression. // // Schedule is a required field Schedule *string `min:"1" type:"string" required:"true"` - // The time zone that the scheduled Maintenance Window executions are based + // The time zone that the scheduled maintenance window executions are based // on, in Internet Assigned Numbers Authority (IANA) format. For example: "America/Los_Angeles", // "etc/UTC", or "Asia/Seoul". For more information, see the Time Zone Database // (https://www.iana.org/time-zones) on the IANA website. ScheduleTimezone *string `type:"string"` - // The date and time, in ISO-8601 Extended format, for when you want the Maintenance - // Window to become active. StartDate allows you to delay activation of the - // Maintenance Window until the specified future date. + // The date and time, in ISO-8601 Extended format, for when you want the maintenance + // window to become active. StartDate allows you to delay activation of the + // maintenance window until the specified future date. StartDate *string `type:"string"` // Optional metadata that you assign to a resource. Tags enable you to categorize // a resource in different ways, such as by purpose, owner, or environment. - // For example, you might want to tag a Maintenance Window to identify the type + // For example, you might want to tag a maintenance window to identify the type // of tasks it will run, the types of targets, and the environment it will run // in. In this case, you could specify the following key name/value pairs: // @@ -15456,7 +15461,7 @@ type CreateMaintenanceWindowInput struct { // // * Key=Environment,Value=Production // - // To add tags to an existing Maintenance Window, use the AddTagsToResource + // To add tags to an existing maintenance window, use the AddTagsToResource // action. Tags []*Tag `type:"list"` } @@ -15590,7 +15595,7 @@ func (s *CreateMaintenanceWindowInput) SetTags(v []*Tag) *CreateMaintenanceWindo type CreateMaintenanceWindowOutput struct { _ struct{} `type:"structure"` - // The ID of the created Maintenance Window. + // The ID of the created maintenance window. WindowId *string `min:"20" type:"string"` } @@ -16110,7 +16115,7 @@ type DeleteInventoryInput struct { // DisableSchema: If you choose this option, the system ignores all inventory // data for the specified version, and any earlier versions. To enable this // schema again, you must call the PutInventory action for a version greater - // than the disbled version. + // than the disabled version. // // DeleteSchema: This option deletes the specified custom type from the Inventory // service. You can recreate the schema later, if you want. @@ -16225,7 +16230,7 @@ func (s *DeleteInventoryOutput) SetTypeName(v string) *DeleteInventoryOutput { type DeleteMaintenanceWindowInput struct { _ struct{} `type:"structure"` - // The ID of the Maintenance Window to delete. + // The ID of the maintenance window to delete. // // WindowId is a required field WindowId *string `min:"20" type:"string" required:"true"` @@ -16266,7 +16271,7 @@ func (s *DeleteMaintenanceWindowInput) SetWindowId(v string) *DeleteMaintenanceW type DeleteMaintenanceWindowOutput struct { _ struct{} `type:"structure"` - // The ID of the deleted Maintenance Window. + // The ID of the deleted maintenance window. WindowId *string `min:"20" type:"string"` } @@ -16682,10 +16687,10 @@ type DeregisterTargetFromMaintenanceWindowInput struct { // The system checks if the target is being referenced by a task. If the target // is being referenced, the system returns an error and does not deregister - // the target from the Maintenance Window. + // the target from the maintenance window. Safe *bool `type:"boolean"` - // The ID of the Maintenance Window the target should be removed from. + // The ID of the maintenance window the target should be removed from. // // WindowId is a required field WindowId *string `min:"20" type:"string" required:"true"` @@ -16749,7 +16754,7 @@ func (s *DeregisterTargetFromMaintenanceWindowInput) SetWindowTargetId(v string) type DeregisterTargetFromMaintenanceWindowOutput struct { _ struct{} `type:"structure"` - // The ID of the Maintenance Window the target was removed from. + // The ID of the maintenance window the target was removed from. WindowId *string `min:"20" type:"string"` // The ID of the removed target definition. @@ -16781,12 +16786,12 @@ func (s *DeregisterTargetFromMaintenanceWindowOutput) SetWindowTargetId(v string type DeregisterTaskFromMaintenanceWindowInput struct { _ struct{} `type:"structure"` - // The ID of the Maintenance Window the task should be removed from. + // The ID of the maintenance window the task should be removed from. // // WindowId is a required field WindowId *string `min:"20" type:"string" required:"true"` - // The ID of the task to remove from the Maintenance Window. + // The ID of the task to remove from the maintenance window. // // WindowTaskId is a required field WindowTaskId *string `min:"36" type:"string" required:"true"` @@ -16839,10 +16844,10 @@ func (s *DeregisterTaskFromMaintenanceWindowInput) SetWindowTaskId(v string) *De type DeregisterTaskFromMaintenanceWindowOutput struct { _ struct{} `type:"structure"` - // The ID of the Maintenance Window the task was removed from. + // The ID of the maintenance window the task was removed from. WindowId *string `min:"20" type:"string"` - // The ID of the task removed from the Maintenance Window. + // The ID of the task removed from the maintenance window. WindowTaskId *string `min:"36" type:"string"` } @@ -18681,13 +18686,13 @@ type DescribeMaintenanceWindowExecutionTaskInvocationsInput struct { // a previous call.) NextToken *string `type:"string"` - // The ID of the specific task in the Maintenance Window task that should be + // The ID of the specific task in the maintenance window task that should be // retrieved. // // TaskId is a required field TaskId *string `min:"36" type:"string" required:"true"` - // The ID of the Maintenance Window execution the task is part of. + // The ID of the maintenance window execution the task is part of. // // WindowExecutionId is a required field WindowExecutionId *string `min:"36" type:"string" required:"true"` @@ -18818,7 +18823,7 @@ type DescribeMaintenanceWindowExecutionTasksInput struct { // a previous call.) NextToken *string `type:"string"` - // The ID of the Maintenance Window execution whose task executions should be + // The ID of the maintenance window execution whose task executions should be // retrieved. // // WindowExecutionId is a required field @@ -18943,7 +18948,7 @@ type DescribeMaintenanceWindowExecutionsInput struct { // a previous call.) NextToken *string `type:"string"` - // The ID of the Maintenance Window whose executions should be retrieved. + // The ID of the maintenance window whose executions should be retrieved. // // WindowId is a required field WindowId *string `min:"20" type:"string" required:"true"` @@ -19019,7 +19024,7 @@ type DescribeMaintenanceWindowExecutionsOutput struct { // items to return, the string is empty. NextToken *string `type:"string"` - // Information about the Maintenance Windows execution. + // Information about the maintenance window executions. WindowExecutions []*MaintenanceWindowExecution `type:"list"` } @@ -19048,8 +19053,8 @@ func (s *DescribeMaintenanceWindowExecutionsOutput) SetWindowExecutions(v []*Mai type DescribeMaintenanceWindowScheduleInput struct { _ struct{} `type:"structure"` - // Filters used to limit the range of results. For example, you can limit Maintenance - // Window executions to only those scheduled before or after a certain date + // Filters used to limit the range of results. For example, you can limit maintenance + // window executions to only those scheduled before or after a certain date // and time. Filters []*PatchOrchestratorFilter `type:"list"` @@ -19069,7 +19074,7 @@ type DescribeMaintenanceWindowScheduleInput struct { // The instance ID or key/value pair to retrieve information about. Targets []*Target `type:"list"` - // The ID of the Maintenance Window to retrieve information about. + // The ID of the maintenance window to retrieve information about. WindowId *string `min:"20" type:"string"` } @@ -19162,7 +19167,7 @@ type DescribeMaintenanceWindowScheduleOutput struct { // next call.) NextToken *string `type:"string"` - // Information about Maintenance Window executions scheduled for the specified + // Information about maintenance window executions scheduled for the specified // time range. ScheduledWindowExecutions []*ScheduledWindowExecution `type:"list"` } @@ -19205,7 +19210,7 @@ type DescribeMaintenanceWindowTargetsInput struct { // a previous call.) NextToken *string `type:"string"` - // The ID of the Maintenance Window whose targets should be retrieved. + // The ID of the maintenance window whose targets should be retrieved. // // WindowId is a required field WindowId *string `min:"20" type:"string" required:"true"` @@ -19281,7 +19286,7 @@ type DescribeMaintenanceWindowTargetsOutput struct { // items to return, the string is empty. NextToken *string `type:"string"` - // Information about the targets in the Maintenance Window. + // Information about the targets in the maintenance window. Targets []*MaintenanceWindowTarget `type:"list"` } @@ -19323,7 +19328,7 @@ type DescribeMaintenanceWindowTasksInput struct { // a previous call.) NextToken *string `type:"string"` - // The ID of the Maintenance Window whose tasks should be retrieved. + // The ID of the maintenance window whose tasks should be retrieved. // // WindowId is a required field WindowId *string `min:"20" type:"string" required:"true"` @@ -19399,7 +19404,7 @@ type DescribeMaintenanceWindowTasksOutput struct { // items to return, the string is empty. NextToken *string `type:"string"` - // Information about the tasks in the Maintenance Window. + // Information about the tasks in the maintenance window. Tasks []*MaintenanceWindowTask `type:"list"` } @@ -19519,7 +19524,7 @@ type DescribeMaintenanceWindowsForTargetOutput struct { // next call.) NextToken *string `type:"string"` - // Information about the Maintenance Window targets and tasks an instance is + // Information about the maintenance window targets and tasks an instance is // associated with. WindowIdentities []*MaintenanceWindowIdentityForTarget `type:"list"` } @@ -19549,8 +19554,8 @@ func (s *DescribeMaintenanceWindowsForTargetOutput) SetWindowIdentities(v []*Mai type DescribeMaintenanceWindowsInput struct { _ struct{} `type:"structure"` - // Optional filters used to narrow down the scope of the returned Maintenance - // Windows. Supported filter keys are Name and Enabled. + // Optional filters used to narrow down the scope of the returned maintenance + // windows. Supported filter keys are Name and Enabled. Filters []*MaintenanceWindowFilter `type:"list"` // The maximum number of items to return for this call. The call also returns @@ -19621,7 +19626,7 @@ type DescribeMaintenanceWindowsOutput struct { // items to return, the string is empty. NextToken *string `type:"string"` - // Information about the Maintenance Windows. + // Information about the maintenance windows. WindowIdentities []*MaintenanceWindowIdentity `type:"list"` } @@ -22150,7 +22155,7 @@ func (s *GetInventorySchemaOutput) SetSchemas(v []*InventoryItemSchema) *GetInve type GetMaintenanceWindowExecutionInput struct { _ struct{} `type:"structure"` - // The ID of the Maintenance Window execution that includes the task. + // The ID of the maintenance window execution that includes the task. // // WindowExecutionId is a required field WindowExecutionId *string `min:"36" type:"string" required:"true"` @@ -22191,22 +22196,22 @@ func (s *GetMaintenanceWindowExecutionInput) SetWindowExecutionId(v string) *Get type GetMaintenanceWindowExecutionOutput struct { _ struct{} `type:"structure"` - // The time the Maintenance Window finished running. + // The time the maintenance window finished running. EndTime *time.Time `type:"timestamp"` - // The time the Maintenance Window started running. + // The time the maintenance window started running. StartTime *time.Time `type:"timestamp"` - // The status of the Maintenance Window execution. + // The status of the maintenance window execution. Status *string `type:"string" enum:"MaintenanceWindowExecutionStatus"` // The details explaining the Status. Only available for certain status values. StatusDetails *string `type:"string"` - // The ID of the task executions from the Maintenance Window execution. + // The ID of the task executions from the maintenance window execution. TaskIds []*string `type:"list"` - // The ID of the Maintenance Window execution. + // The ID of the maintenance window execution. WindowExecutionId *string `min:"36" type:"string"` } @@ -22259,13 +22264,13 @@ func (s *GetMaintenanceWindowExecutionOutput) SetWindowExecutionId(v string) *Ge type GetMaintenanceWindowExecutionTaskInput struct { _ struct{} `type:"structure"` - // The ID of the specific task execution in the Maintenance Window task that + // The ID of the specific task execution in the maintenance window task that // should be retrieved. // // TaskId is a required field TaskId *string `min:"36" type:"string" required:"true"` - // The ID of the Maintenance Window execution that includes the task. + // The ID of the maintenance window execution that includes the task. // // WindowExecutionId is a required field WindowExecutionId *string `min:"36" type:"string" required:"true"` @@ -22323,13 +22328,13 @@ type GetMaintenanceWindowExecutionTaskInvocationInput struct { // InvocationId is a required field InvocationId *string `min:"36" type:"string" required:"true"` - // The ID of the specific task in the Maintenance Window task that should be + // The ID of the specific task in the maintenance window task that should be // retrieved. // // TaskId is a required field TaskId *string `min:"36" type:"string" required:"true"` - // The ID of the Maintenance Window execution for which the task is a part. + // The ID of the maintenance window execution for which the task is a part. // // WindowExecutionId is a required field WindowExecutionId *string `min:"36" type:"string" required:"true"` @@ -22404,7 +22409,7 @@ type GetMaintenanceWindowExecutionTaskInvocationOutput struct { InvocationId *string `min:"36" type:"string"` // User-provided value to be included in any CloudWatch events raised while - // running tasks for these targets in this Maintenance Window. + // running tasks for these targets in this maintenance window. OwnerInformation *string `min:"1" type:"string" sensitive:"true"` // The parameters used at the time that the task ran. @@ -22423,14 +22428,14 @@ type GetMaintenanceWindowExecutionTaskInvocationOutput struct { // The task execution ID. TaskExecutionId *string `min:"36" type:"string"` - // Retrieves the task type for a Maintenance Window. Task types include the + // Retrieves the task type for a maintenance window. Task types include the // following: LAMBDA, STEP_FUNCTION, AUTOMATION, RUN_COMMAND. TaskType *string `type:"string" enum:"MaintenanceWindowTaskType"` - // The Maintenance Window execution ID. + // The maintenance window execution ID. WindowExecutionId *string `min:"36" type:"string"` - // The Maintenance Window target ID. + // The maintenance window target ID. WindowTargetId *string `type:"string"` } @@ -22547,7 +22552,7 @@ type GetMaintenanceWindowExecutionTaskOutput struct { // The ARN of the task that ran. TaskArn *string `min:"1" type:"string"` - // The ID of the specific task execution in the Maintenance Window task that + // The ID of the specific task execution in the maintenance window task that // was retrieved. TaskExecutionId *string `min:"36" type:"string"` @@ -22556,7 +22561,7 @@ type GetMaintenanceWindowExecutionTaskOutput struct { // TaskParameters has been deprecated. To specify parameters to pass to a task // when it runs, instead use the Parameters option in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options - // for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. // // The map has the following format: // @@ -22568,7 +22573,7 @@ type GetMaintenanceWindowExecutionTaskOutput struct { // The type of task that was run. Type *string `type:"string" enum:"MaintenanceWindowTaskType"` - // The ID of the Maintenance Window execution that includes the task. + // The ID of the maintenance window execution that includes the task. WindowExecutionId *string `min:"36" type:"string"` } @@ -22663,7 +22668,7 @@ func (s *GetMaintenanceWindowExecutionTaskOutput) SetWindowExecutionId(v string) type GetMaintenanceWindowInput struct { _ struct{} `type:"structure"` - // The ID of the desired Maintenance Window. + // The ID of the maintenance window for which you want to retrieve information. // // WindowId is a required field WindowId *string `min:"20" type:"string" required:"true"` @@ -22704,56 +22709,56 @@ func (s *GetMaintenanceWindowInput) SetWindowId(v string) *GetMaintenanceWindowI type GetMaintenanceWindowOutput struct { _ struct{} `type:"structure"` - // Whether targets must be registered with the Maintenance Window before tasks + // Whether targets must be registered with the maintenance window before tasks // can be defined for those targets. AllowUnassociatedTargets *bool `type:"boolean"` - // The date the Maintenance Window was created. + // The date the maintenance window was created. CreatedDate *time.Time `type:"timestamp"` - // The number of hours before the end of the Maintenance Window that Systems + // The number of hours before the end of the maintenance window that Systems // Manager stops scheduling new tasks for execution. Cutoff *int64 `type:"integer"` - // The description of the Maintenance Window. + // The description of the maintenance window. Description *string `min:"1" type:"string" sensitive:"true"` - // The duration of the Maintenance Window in hours. + // The duration of the maintenance window in hours. Duration *int64 `min:"1" type:"integer"` - // Whether the Maintenance Windows is enabled. + // Indicates whether the maintenance window is enabled. Enabled *bool `type:"boolean"` - // The date and time, in ISO-8601 Extended format, for when the Maintenance - // Window is scheduled to become inactive. The Maintenance Window will not run + // The date and time, in ISO-8601 Extended format, for when the maintenance + // window is scheduled to become inactive. The maintenance window will not run // after this specified time. EndDate *string `type:"string"` - // The date the Maintenance Window was last modified. + // The date the maintenance window was last modified. ModifiedDate *time.Time `type:"timestamp"` - // The name of the Maintenance Window. + // The name of the maintenance window. Name *string `min:"3" type:"string"` - // The next time the Maintenance Window will actually run, taking into account - // any specified times for the Maintenance Window to become active or inactive. + // The next time the maintenance window will actually run, taking into account + // any specified times for the maintenance window to become active or inactive. NextExecutionTime *string `type:"string"` - // The schedule of the Maintenance Window in the form of a cron or rate expression. + // The schedule of the maintenance window in the form of a cron or rate expression. Schedule *string `min:"1" type:"string"` - // The time zone that the scheduled Maintenance Window executions are based + // The time zone that the scheduled maintenance window executions are based // on, in Internet Assigned Numbers Authority (IANA) format. For example: "America/Los_Angeles", // "etc/UTC", or "Asia/Seoul". For more information, see the Time Zone Database // (https://www.iana.org/time-zones) on the IANA website. ScheduleTimezone *string `type:"string"` - // The date and time, in ISO-8601 Extended format, for when the Maintenance - // Window is scheduled to become active. The Maintenance Window will not run + // The date and time, in ISO-8601 Extended format, for when the maintenance + // window is scheduled to become active. The maintenance window will not run // before this specified time. StartDate *string `type:"string"` - // The ID of the created Maintenance Window. + // The ID of the created maintenance window. WindowId *string `min:"20" type:"string"` } @@ -22854,12 +22859,12 @@ func (s *GetMaintenanceWindowOutput) SetWindowId(v string) *GetMaintenanceWindow type GetMaintenanceWindowTaskInput struct { _ struct{} `type:"structure"` - // The Maintenance Window ID that includes the task to retrieve. + // The maintenance window ID that includes the task to retrieve. // // WindowId is a required field WindowId *string `min:"20" type:"string" required:"true"` - // The Maintenance Window task ID to retrieve. + // The maintenance window task ID to retrieve. // // WindowTaskId is a required field WindowTaskId *string `min:"36" type:"string" required:"true"` @@ -22920,7 +22925,7 @@ type GetMaintenanceWindowTaskOutput struct { // LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, // instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options - // for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. LoggingInfo *LoggingInfo `type:"structure"` // The maximum number of targets allowed to run this task in parallel. @@ -22936,7 +22941,8 @@ type GetMaintenanceWindowTaskOutput struct { // priority. Tasks that have the same priority are scheduled in parallel. Priority *int64 `type:"integer"` - // The IAM service role to assume during task execution. + // The ARN of the IAM service role to use to publish Amazon Simple Notification + // Service (Amazon SNS) notifications for maintenance window Run Command tasks. ServiceRoleArn *string `type:"string"` // The targets where the task should run. @@ -22956,16 +22962,16 @@ type GetMaintenanceWindowTaskOutput struct { // TaskParameters has been deprecated. To specify parameters to pass to a task // when it runs, instead use the Parameters option in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options - // for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. TaskParameters map[string]*MaintenanceWindowTaskParameterValueExpression `type:"map" sensitive:"true"` // The type of task to run. TaskType *string `type:"string" enum:"MaintenanceWindowTaskType"` - // The retrieved Maintenance Window ID. + // The retrieved maintenance window ID. WindowId *string `min:"20" type:"string"` - // The retrieved Maintenance Window task ID. + // The retrieved maintenance window task ID. WindowTaskId *string `min:"36" type:"string"` } @@ -23444,8 +23450,8 @@ func (s *GetParametersInput) SetWithDecryption(v bool) *GetParametersInput { type GetParametersOutput struct { _ struct{} `type:"structure"` - // A list of parameters that are not formatted correctly or do not run when - // executed. + // A list of parameters that are not formatted correctly or do not run during + // an execution. InvalidParameters []*string `min:"1" type:"list"` // A list of details for a parameter. @@ -23991,7 +23997,7 @@ type InstanceAssociationStatusInfo struct { // Detailed status information about the instance association. DetailedStatus *string `type:"string"` - // The association document verions. + // The association document versions. DocumentVersion *string `type:"string"` // An error code returned by the request to create the association. @@ -24780,7 +24786,7 @@ type InventoryDeletionStatusItem struct { DeletionStartTime *time.Time `type:"timestamp"` // Information about the delete operation. For more information about this summary, - // see Understanding the Delete Inventory Summary (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-inventory-delete.html#sysman-inventory-delete-summary) + // see Understanding the Delete Inventory Summary (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-inventory-custom.html#sysman-inventory-delete) // in the AWS Systems Manager User Guide. DeletionSummary *InventoryDeletionSummary `type:"structure"` @@ -26816,7 +26822,7 @@ func (s *ListTagsForResourceOutput) SetTagList(v []*Tag) *ListTagsForResourceOut // LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, // instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options -// for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. +// for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. type LoggingInfo struct { _ struct{} `type:"structure"` @@ -26899,12 +26905,12 @@ type MaintenanceWindowAutomationParameters struct { // LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, // instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options - // for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. // // TaskParameters has been deprecated. To specify parameters to pass to a task // when it runs, instead use the Parameters option in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options - // for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. // // For AUTOMATION task types, Systems Manager ignores any values specified for // these parameters. @@ -26946,7 +26952,7 @@ func (s *MaintenanceWindowAutomationParameters) SetParameters(v map[string][]*st return s } -// Describes the information about an execution of a Maintenance Window. +// Describes the information about an execution of a maintenance window. type MaintenanceWindowExecution struct { _ struct{} `type:"structure"` @@ -26962,10 +26968,10 @@ type MaintenanceWindowExecution struct { // The details explaining the Status. Only available for certain status values. StatusDetails *string `type:"string"` - // The ID of the Maintenance Window execution. + // The ID of the maintenance window execution. WindowExecutionId *string `min:"36" type:"string"` - // The ID of the Maintenance Window. + // The ID of the maintenance window. WindowId *string `min:"20" type:"string"` } @@ -27015,7 +27021,7 @@ func (s *MaintenanceWindowExecution) SetWindowId(v string) *MaintenanceWindowExe return s } -// Information about a task execution performed as part of a Maintenance Window +// Information about a task execution performed as part of a maintenance window // execution. type MaintenanceWindowExecutionTaskIdentity struct { _ struct{} `type:"structure"` @@ -27036,13 +27042,13 @@ type MaintenanceWindowExecutionTaskIdentity struct { // The ARN of the task that ran. TaskArn *string `min:"1" type:"string"` - // The ID of the specific task execution in the Maintenance Window execution. + // The ID of the specific task execution in the maintenance window execution. TaskExecutionId *string `min:"36" type:"string"` // The type of task that ran. TaskType *string `type:"string" enum:"MaintenanceWindowTaskType"` - // The ID of the Maintenance Window execution that ran the task. + // The ID of the maintenance window execution that ran the task. WindowExecutionId *string `min:"36" type:"string"` } @@ -27105,7 +27111,7 @@ func (s *MaintenanceWindowExecutionTaskIdentity) SetWindowExecutionId(v string) } // Describes the information about a task invocation for a particular target -// as part of a task execution performed as part of a Maintenance Window execution. +// as part of a task execution performed as part of a maintenance window execution. type MaintenanceWindowExecutionTaskInvocationIdentity struct { _ struct{} `type:"structure"` @@ -27120,7 +27126,7 @@ type MaintenanceWindowExecutionTaskInvocationIdentity struct { InvocationId *string `min:"36" type:"string"` // User-provided value that was specified when the target was registered with - // the Maintenance Window. This was also included in any CloudWatch events raised + // the maintenance window. This was also included in any CloudWatch events raised // during the task invocation. OwnerInformation *string `min:"1" type:"string" sensitive:"true"` @@ -27137,16 +27143,16 @@ type MaintenanceWindowExecutionTaskInvocationIdentity struct { // for certain Status values. StatusDetails *string `type:"string"` - // The ID of the specific task execution in the Maintenance Window execution. + // The ID of the specific task execution in the maintenance window execution. TaskExecutionId *string `min:"36" type:"string"` // The task type. TaskType *string `type:"string" enum:"MaintenanceWindowTaskType"` - // The ID of the Maintenance Window execution that ran the task. + // The ID of the maintenance window execution that ran the task. WindowExecutionId *string `min:"36" type:"string"` - // The ID of the target definition in this Maintenance Window the invocation + // The ID of the target definition in this maintenance window the invocation // was performed for. WindowTargetId *string `type:"string"` } @@ -27279,46 +27285,46 @@ func (s *MaintenanceWindowFilter) SetValues(v []*string) *MaintenanceWindowFilte return s } -// Information about the Maintenance Window. +// Information about the maintenance window. type MaintenanceWindowIdentity struct { _ struct{} `type:"structure"` - // The number of hours before the end of the Maintenance Window that Systems + // The number of hours before the end of the maintenance window that Systems // Manager stops scheduling new tasks for execution. Cutoff *int64 `type:"integer"` - // A description of the Maintenance Window. + // A description of the maintenance window. Description *string `min:"1" type:"string" sensitive:"true"` - // The duration of the Maintenance Window in hours. + // The duration of the maintenance window in hours. Duration *int64 `min:"1" type:"integer"` - // Whether the Maintenance Window is enabled. + // Indicates whether the maintenance window is enabled. Enabled *bool `type:"boolean"` - // The date and time, in ISO-8601 Extended format, for when the Maintenance - // Window is scheduled to become inactive. + // The date and time, in ISO-8601 Extended format, for when the maintenance + // window is scheduled to become inactive. EndDate *string `type:"string"` - // The name of the Maintenance Window. + // The name of the maintenance window. Name *string `min:"3" type:"string"` - // The next time the Maintenance Window will actually run, taking into account - // any specified times for the Maintenance Window to become active or inactive. + // The next time the maintenance window will actually run, taking into account + // any specified times for the maintenance window to become active or inactive. NextExecutionTime *string `type:"string"` - // The schedule of the Maintenance Window in the form of a cron or rate expression. + // The schedule of the maintenance window in the form of a cron or rate expression. Schedule *string `min:"1" type:"string"` - // The time zone that the scheduled Maintenance Window executions are based + // The time zone that the scheduled maintenance window executions are based // on, in Internet Assigned Numbers Authority (IANA) format. ScheduleTimezone *string `type:"string"` - // The date and time, in ISO-8601 Extended format, for when the Maintenance - // Window is scheduled to become active. + // The date and time, in ISO-8601 Extended format, for when the maintenance + // window is scheduled to become active. StartDate *string `type:"string"` - // The ID of the Maintenance Window. + // The ID of the maintenance window. WindowId *string `min:"20" type:"string"` } @@ -27398,14 +27404,14 @@ func (s *MaintenanceWindowIdentity) SetWindowId(v string) *MaintenanceWindowIden return s } -// The Maintenance Window to which the specified target belongs. +// The maintenance window to which the specified target belongs. type MaintenanceWindowIdentityForTarget struct { _ struct{} `type:"structure"` - // The name of the Maintenance Window. + // The name of the maintenance window. Name *string `min:"3" type:"string"` - // The ID of the Maintenance Window. + // The ID of the maintenance window. WindowId *string `min:"20" type:"string"` } @@ -27439,12 +27445,12 @@ func (s *MaintenanceWindowIdentityForTarget) SetWindowId(v string) *MaintenanceW // LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, // instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options -// for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. +// for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. // // TaskParameters has been deprecated. To specify parameters to pass to a task // when it runs, instead use the Parameters option in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options -// for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. +// for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. // // For Lambda tasks, Systems Manager ignores any values specified for TaskParameters // and LoggingInfo. @@ -27520,19 +27526,19 @@ func (s *MaintenanceWindowLambdaParameters) SetQualifier(v string) *MaintenanceW // LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, // instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options -// for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. +// for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. // // TaskParameters has been deprecated. To specify parameters to pass to a task // when it runs, instead use the Parameters option in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options -// for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. +// for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. // // For Run Command tasks, Systems Manager uses specified values for TaskParameters // and LoggingInfo only if no values are specified for TaskInvocationParameters. type MaintenanceWindowRunCommandParameters struct { _ struct{} `type:"structure"` - // Information about the command(s) to run. + // Information about the commands to run. Comment *string `type:"string"` // The SHA-256 or SHA-1 hash created by the system when the document was created. @@ -27555,7 +27561,8 @@ type MaintenanceWindowRunCommandParameters struct { // The parameters for the RUN_COMMAND task execution. Parameters map[string][]*string `type:"map"` - // The IAM service role to assume during task execution. + // The ARN of the IAM service role to use to publish Amazon Simple Notification + // Service (Amazon SNS) notifications for maintenance window Run Command tasks. ServiceRoleArn *string `type:"string"` // If this time is reached and the command has not already started running, @@ -27651,12 +27658,12 @@ func (s *MaintenanceWindowRunCommandParameters) SetTimeoutSeconds(v int64) *Main // LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, // instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options -// for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. +// for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. // // TaskParameters has been deprecated. To specify parameters to pass to a task // when it runs, instead use the Parameters option in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options -// for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. +// for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. // // For Step Functions tasks, Systems Manager ignores any values specified for // TaskParameters and LoggingInfo. @@ -27705,21 +27712,21 @@ func (s *MaintenanceWindowStepFunctionsParameters) SetName(v string) *Maintenanc return s } -// The target registered with the Maintenance Window. +// The target registered with the maintenance window. type MaintenanceWindowTarget struct { _ struct{} `type:"structure"` // A description for the target. Description *string `min:"1" type:"string" sensitive:"true"` - // The target name. + // The name for the maintenance window target. Name *string `min:"3" type:"string"` // A user-provided value that will be included in any CloudWatch events that - // are raised while running tasks for these targets in this Maintenance Window. + // are raised while running tasks for these targets in this maintenance window. OwnerInformation *string `min:"1" type:"string" sensitive:"true"` - // The type of target that is being registered with the Maintenance Window. + // The type of target that is being registered with the maintenance window. ResourceType *string `type:"string" enum:"MaintenanceWindowResourceType"` // The targets, either instances or tags. @@ -27733,7 +27740,7 @@ type MaintenanceWindowTarget struct { // Key=,Values=. Targets []*Target `type:"list"` - // The ID of the Maintenance Window to register the target with. + // The ID of the maintenance window to register the target with. WindowId *string `min:"20" type:"string"` // The ID of the target. @@ -27792,7 +27799,7 @@ func (s *MaintenanceWindowTarget) SetWindowTargetId(v string) *MaintenanceWindow return s } -// Information about a task defined for a Maintenance Window. +// Information about a task defined for a maintenance window. type MaintenanceWindowTask struct { _ struct{} `type:"structure"` @@ -27804,7 +27811,7 @@ type MaintenanceWindowTask struct { // LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, // instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options - // for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. LoggingInfo *LoggingInfo `type:"structure"` // The maximum number of targets this task can be run for, in parallel. @@ -27816,12 +27823,13 @@ type MaintenanceWindowTask struct { // The task name. Name *string `min:"3" type:"string"` - // The priority of the task in the Maintenance Window. The lower the number, + // The priority of the task in the maintenance window. The lower the number, // the higher the priority. Tasks that have the same priority are scheduled // in parallel. Priority *int64 `type:"integer"` - // The role that should be assumed when running the task. + // The ARN of the IAM service role to use to publish Amazon Simple Notification + // Service (Amazon SNS) notifications for maintenance window Run Command tasks. ServiceRoleArn *string `type:"string"` // The targets (either instances or tags). Instances are specified using Key=instanceids,Values=,. @@ -27839,14 +27847,14 @@ type MaintenanceWindowTask struct { // TaskParameters has been deprecated. To specify parameters to pass to a task // when it runs, instead use the Parameters option in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options - // for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. TaskParameters map[string]*MaintenanceWindowTaskParameterValueExpression `type:"map" sensitive:"true"` // The type of task. The type can be one of the following: RUN_COMMAND, AUTOMATION, // LAMBDA, or STEP_FUNCTION. Type *string `type:"string" enum:"MaintenanceWindowTaskType"` - // The ID of the Maintenance Window where the task is registered. + // The ID of the maintenance window where the task is registered. WindowId *string `min:"20" type:"string"` // The task ID. @@ -28173,14 +28181,15 @@ func (s *NonCompliantSummary) SetSeveritySummary(v *SeveritySummary) *NonComplia type NotificationConfig struct { _ struct{} `type:"structure"` - // An Amazon Resource Name (ARN) for a Simple Notification Service (SNS) topic. - // Run Command pushes notifications about command status changes to this topic. + // An Amazon Resource Name (ARN) for an Amazon Simple Notification Service (Amazon + // SNS) topic. Run Command pushes notifications about command status changes + // to this topic. NotificationArn *string `type:"string"` // The different events for which you can receive notifications. These events // include the following: All (events), InProgress, Success, TimedOut, Cancelled, // Failed. To learn more about these events, see Configuring Amazon SNS Notifications - // for Run Command (http://docs.aws.amazon.com/systems-manager/latest/userguide/rc-sns-notifications.html) + // for AWS Systems Manager (http://docs.aws.amazon.com/systems-manager/latest/userguide/monitoring-sns-notifications.html) // in the AWS Systems Manager User Guide. NotificationEvents []*string `type:"list"` @@ -28376,6 +28385,9 @@ type ParameterHistory struct { Name *string `min:"1" type:"string"` // Information about the policies assigned to a parameter. + // + // Working with Parameter Policies (https://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-store-policies.html) + // in the AWS Systems Manager User Guide. Policies []*ParameterInlinePolicy `type:"list"` // The parameter tier. @@ -28519,7 +28531,7 @@ func (s *ParameterInlinePolicy) SetPolicyType(v string) *ParameterInlinePolicy { return s } -// Metada includes information like the ARN of the last user and the date/time +// Metadata includes information like the ARN of the last user and the date/time // the parameter was last used. type ParameterMetadata struct { _ struct{} `type:"structure"` @@ -30256,30 +30268,41 @@ type RegisterTargetWithMaintenanceWindowInput struct { Name *string `min:"3" type:"string"` // User-provided value that will be included in any CloudWatch events raised - // while running tasks for these targets in this Maintenance Window. + // while running tasks for these targets in this maintenance window. OwnerInformation *string `min:"1" type:"string" sensitive:"true"` - // The type of target being registered with the Maintenance Window. + // The type of target being registered with the maintenance window. // // ResourceType is a required field ResourceType *string `type:"string" required:"true" enum:"MaintenanceWindowResourceType"` - // The targets (either instances or tags). + // The targets to register with the maintenance window. In other words, the + // instances to run commands on when the maintenance window runs. // - // Specify instances using the following format: + // You can specify targets using either instance IDs or tags that have been + // applied to instances. // - // Key=InstanceIds,Values=, + // Example 1: Specify instance IDs // - // Specify tags using either of the following formats: + // Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3 // - // Key=tag:,Values=, + // Example 2: Use tag key-pairs applied to instances // - // Key=tag-key,Values=, + // Key=tag:my-tag-key,Values=my-tag-value-1,my-tag-value-2 + // + // Example 3: Use tag-keys applied to instances + // + // Key=tag-key,Values=my-tag-key-1,my-tag-key-2 + // + // For more information about these examples formats, including the best use + // case for each one, see Examples: Register Targets with a Maintenance Window + // (https://docs.aws.amazon.com/systems-manager/latest/userguide/mw-cli-tutorial-targets-examples.html) + // in the AWS Systems Manager User Guide. // // Targets is a required field Targets []*Target `type:"list" required:"true"` - // The ID of the Maintenance Window the target should be registered with. + // The ID of the maintenance window the target should be registered with. // // WindowId is a required field WindowId *string `min:"20" type:"string" required:"true"` @@ -30384,7 +30407,7 @@ func (s *RegisterTargetWithMaintenanceWindowInput) SetWindowId(v string) *Regist type RegisterTargetWithMaintenanceWindowOutput struct { _ struct{} `type:"structure"` - // The ID of the target definition in this Maintenance Window. + // The ID of the target definition in this maintenance window. WindowTargetId *string `min:"36" type:"string"` } @@ -30419,7 +30442,7 @@ type RegisterTaskWithMaintenanceWindowInput struct { // LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, // instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options - // for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. LoggingInfo *LoggingInfo `type:"structure"` // The maximum number of targets this task can be run for in parallel. @@ -30435,33 +30458,32 @@ type RegisterTaskWithMaintenanceWindowInput struct { // An optional name for the task. Name *string `min:"3" type:"string"` - // The priority of the task in the Maintenance Window, the lower the number - // the higher the priority. Tasks in a Maintenance Window are scheduled in priority + // The priority of the task in the maintenance window, the lower the number + // the higher the priority. Tasks in a maintenance window are scheduled in priority // order with tasks that have the same priority scheduled in parallel. Priority *int64 `type:"integer"` - // The role to assume when running the Maintenance Window task. + // The ARN of the IAM service role for Systems Manager to assume when running + // a maintenance window task. If you do not specify a service role ARN, Systems + // Manager uses your account's service-linked role. If no service-linked role + // for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow. // - // If you do not specify a service role ARN, Systems Manager will use your account's - // service-linked role for Systems Manager by default. If no service-linked - // role for Systems Manager exists in your account, it will be created when - // you run RegisterTaskWithMaintenanceWindow without specifying a service role - // ARN. + // For more information, see the following topics in the in the AWS Systems + // Manager User Guide: // - // For more information, see Service-Linked Role Permissions for Systems Manager - // (http://docs.aws.amazon.com/systems-manager/latest/userguide/using-service-linked-roles.html#slr-permissions) - // and Should I Use a Service-Linked Role or a Custom Service Role to Run Maintenance - // Window Tasks? (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-maintenance-permissions.html#maintenance-window-tasks-service-role) - // in the AWS Systems Manager User Guide. + // * Service-Linked Role Permissions for Systems Manager (http://docs.aws.amazon.com/systems-manager/latest/userguide/using-service-linked-roles.html#slr-permissions) + // + // * Should I Use a Service-Linked Role or a Custom Service Role to Run Maintenance + // Window Tasks? (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-maintenance-permissions.html#maintenance-window-tasks-service-role) ServiceRoleArn *string `type:"string"` - // The targets (either instances or Maintenance Window targets). + // The targets (either instances or maintenance window targets). // // Specify instances using the following format: // // Key=InstanceIds,Values=, // - // Specify Maintenance Window targets using the following format: + // Specify maintenance window targets using the following format: // // Key=,Values=, // @@ -30482,7 +30504,7 @@ type RegisterTaskWithMaintenanceWindowInput struct { // TaskParameters has been deprecated. To specify parameters to pass to a task // when it runs, instead use the Parameters option in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options - // for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. TaskParameters map[string]*MaintenanceWindowTaskParameterValueExpression `type:"map" sensitive:"true"` // The type of task being registered. @@ -30490,7 +30512,7 @@ type RegisterTaskWithMaintenanceWindowInput struct { // TaskType is a required field TaskType *string `type:"string" required:"true" enum:"MaintenanceWindowTaskType"` - // The ID of the Maintenance Window the task should be added to. + // The ID of the maintenance window the task should be added to. // // WindowId is a required field WindowId *string `min:"20" type:"string" required:"true"` @@ -30662,7 +30684,7 @@ func (s *RegisterTaskWithMaintenanceWindowInput) SetWindowId(v string) *Register type RegisterTaskWithMaintenanceWindowOutput struct { _ struct{} `type:"structure"` - // The ID of the task in the Maintenance Window. + // The ID of the task in the maintenance window. WindowTaskId *string `min:"36" type:"string"` } @@ -30697,7 +30719,7 @@ type RemoveTagsFromResourceInput struct { // For the Document and Parameter values, use the name of the resource. // // The ManagedInstance type for this API action is only for on-premises managed - // instances. You must specify the the name of the managed instance in the following + // instances. You must specify the name of the managed instance in the following // format: mi-ID_number. For example, mi-1a2b3c4d5e6f. // // ResourceId is a required field @@ -30706,7 +30728,7 @@ type RemoveTagsFromResourceInput struct { // The type of resource of which you want to remove a tag. // // The ManagedInstance type for this API action is only for on-premises managed - // instances. You must specify the the name of the managed instance in the following + // instances. You must specify the name of the managed instance in the following // format: mi-ID_number. For example, mi-1a2b3c4d5e6f. // // ResourceType is a required field @@ -31052,7 +31074,7 @@ type ResourceDataSyncS3Destination struct { _ struct{} `type:"structure"` // The ARN of an encryption key for a destination in Amazon S3. Must belong - // to the same region as the destination Amazon S3 bucket. + // to the same Region as the destination Amazon S3 bucket. AWSKMSKeyARN *string `min:"1" type:"string"` // The name of the Amazon S3 bucket where the aggregated data is stored. @@ -31366,18 +31388,18 @@ func (s *S3OutputUrl) SetOutputUrl(v string) *S3OutputUrl { return s } -// Information about a scheduled execution for a Maintenance Window. +// Information about a scheduled execution for a maintenance window. type ScheduledWindowExecution struct { _ struct{} `type:"structure"` - // The time, in ISO-8601 Extended format, that the Maintenance Window is scheduled + // The time, in ISO-8601 Extended format, that the maintenance window is scheduled // to be run. ExecutionTime *string `type:"string"` - // The name of the Maintenance Window to be run. + // The name of the maintenance window to be run. Name *string `min:"3" type:"string"` - // The ID of the Maintenance Window to be run. + // The ID of the maintenance window to be run. WindowId *string `min:"20" type:"string"` } @@ -31420,10 +31442,24 @@ type SendAutomationSignalInput struct { // The data sent with the signal. The data schema depends on the type of signal // used in the request. + // + // For Approve and Reject signal types, the payload is an optional comment that + // you can send with the signal type. For example: + // + // Comment="Looks good" + // + // For StartStep and Resume signal types, you must send the name of the Automation + // step to start or resume as the payload. For example: + // + // StepName="step1" + // + // For the StopStep signal type, you must send the step execution ID as the + // payload. For example: + // + // StepExecutionId="97fff367-fc5a-4299-aed8-0123456789ab" Payload map[string][]*string `min:"1" type:"map"` - // The type of signal. Valid signal types include the following: Approve and - // Reject + // The type of signal to send to an Automation execution. // // SignalType is a required field SignalType *string `type:"string" required:"true" enum:"SignalType"` @@ -31573,7 +31609,8 @@ type SendCommandInput struct { // The required and optional parameters specified in the document being run. Parameters map[string][]*string `type:"map"` - // The IAM role that Systems Manager uses to send notifications. + // The ARN of the IAM service role to use to publish Amazon Simple Notification + // Service (Amazon SNS) notifications for Run Command commands. ServiceRoleArn *string `type:"string"` // (Optional) An array of search criteria that targets instances using a Key,Value @@ -32877,8 +32914,8 @@ func (s StopAutomationExecutionOutput) GoString() string { // Metadata that you assign to your AWS resources. Tags enable you to categorize // your resources in different ways, for example, by purpose, owner, or environment. -// In Systems Manager, you can apply tags to documents, managed instances, Maintenance -// Windows, Parameter Store parameters, and patch baselines. +// In Systems Manager, you can apply tags to documents, managed instances, maintenance +// windows, Parameter Store parameters, and patch baselines. type Tag struct { _ struct{} `type:"structure"` @@ -33008,11 +33045,11 @@ type TargetLocation struct { // The AWS Regions targeted by the current Automation execution. Regions []*string `min:"1" type:"list"` - // The maxium number of AWS accounts and AWS regions allowed to run the Automation + // The maximum number of AWS accounts and AWS regions allowed to run the Automation // concurrently TargetLocationMaxConcurrency *string `min:"1" type:"string"` - // The maxium number of errors allowed before the system stops queueing additional + // The maximum number of errors allowed before the system stops queueing additional // Automation executions for the currently running Automation. TargetLocationMaxErrors *string `min:"1" type:"string"` } @@ -33693,29 +33730,29 @@ func (s *UpdateDocumentOutput) SetDocumentDescription(v *DocumentDescription) *U type UpdateMaintenanceWindowInput struct { _ struct{} `type:"structure"` - // Whether targets must be registered with the Maintenance Window before tasks + // Whether targets must be registered with the maintenance window before tasks // can be defined for those targets. AllowUnassociatedTargets *bool `type:"boolean"` - // The number of hours before the end of the Maintenance Window that Systems + // The number of hours before the end of the maintenance window that Systems // Manager stops scheduling new tasks for execution. Cutoff *int64 `type:"integer"` // An optional description for the update request. Description *string `min:"1" type:"string" sensitive:"true"` - // The duration of the Maintenance Window in hours. + // The duration of the maintenance window in hours. Duration *int64 `min:"1" type:"integer"` - // Whether the Maintenance Window is enabled. + // Whether the maintenance window is enabled. Enabled *bool `type:"boolean"` - // The date and time, in ISO-8601 Extended format, for when you want the Maintenance - // Window to become inactive. EndDate allows you to set a date and time in the - // future when the Maintenance Window will no longer run. + // The date and time, in ISO-8601 Extended format, for when you want the maintenance + // window to become inactive. EndDate allows you to set a date and time in the + // future when the maintenance window will no longer run. EndDate *string `type:"string"` - // The name of the Maintenance Window. + // The name of the maintenance window. Name *string `min:"3" type:"string"` // If True, then all fields that are required by the CreateMaintenanceWindow @@ -33723,22 +33760,22 @@ type UpdateMaintenanceWindowInput struct { // specified are set to null. Replace *bool `type:"boolean"` - // The schedule of the Maintenance Window in the form of a cron or rate expression. + // The schedule of the maintenance window in the form of a cron or rate expression. Schedule *string `min:"1" type:"string"` - // The time zone that the scheduled Maintenance Window executions are based + // The time zone that the scheduled maintenance window executions are based // on, in Internet Assigned Numbers Authority (IANA) format. For example: "America/Los_Angeles", // "etc/UTC", or "Asia/Seoul". For more information, see the Time Zone Database // (https://www.iana.org/time-zones) on the IANA website. ScheduleTimezone *string `type:"string"` - // The time zone that the scheduled Maintenance Window executions are based + // The time zone that the scheduled maintenance window executions are based // on, in Internet Assigned Numbers Authority (IANA) format. For example: "America/Los_Angeles", // "etc/UTC", or "Asia/Seoul". For more information, see the Time Zone Database // (https://www.iana.org/time-zones) on the IANA website. StartDate *string `type:"string"` - // The ID of the Maintenance Window to update. + // The ID of the maintenance window to update. // // WindowId is a required field WindowId *string `min:"20" type:"string" required:"true"` @@ -33857,46 +33894,46 @@ func (s *UpdateMaintenanceWindowInput) SetWindowId(v string) *UpdateMaintenanceW type UpdateMaintenanceWindowOutput struct { _ struct{} `type:"structure"` - // Whether targets must be registered with the Maintenance Window before tasks + // Whether targets must be registered with the maintenance window before tasks // can be defined for those targets. AllowUnassociatedTargets *bool `type:"boolean"` - // The number of hours before the end of the Maintenance Window that Systems + // The number of hours before the end of the maintenance window that Systems // Manager stops scheduling new tasks for execution. Cutoff *int64 `type:"integer"` // An optional description of the update. Description *string `min:"1" type:"string" sensitive:"true"` - // The duration of the Maintenance Window in hours. + // The duration of the maintenance window in hours. Duration *int64 `min:"1" type:"integer"` - // Whether the Maintenance Window is enabled. + // Whether the maintenance window is enabled. Enabled *bool `type:"boolean"` - // The date and time, in ISO-8601 Extended format, for when the Maintenance - // Window is scheduled to become inactive. The Maintenance Window will not run + // The date and time, in ISO-8601 Extended format, for when the maintenance + // window is scheduled to become inactive. The maintenance window will not run // after this specified time. EndDate *string `type:"string"` - // The name of the Maintenance Window. + // The name of the maintenance window. Name *string `min:"3" type:"string"` - // The schedule of the Maintenance Window in the form of a cron or rate expression. + // The schedule of the maintenance window in the form of a cron or rate expression. Schedule *string `min:"1" type:"string"` - // The time zone that the scheduled Maintenance Window executions are based + // The time zone that the scheduled maintenance window executions are based // on, in Internet Assigned Numbers Authority (IANA) format. For example: "America/Los_Angeles", // "etc/UTC", or "Asia/Seoul". For more information, see the Time Zone Database // (https://www.iana.org/time-zones) on the IANA website. ScheduleTimezone *string `type:"string"` - // The date and time, in ISO-8601 Extended format, for when the Maintenance - // Window is scheduled to become active. The Maintenance Window will not run + // The date and time, in ISO-8601 Extended format, for when the maintenance + // window is scheduled to become active. The maintenance window will not run // before this specified time. StartDate *string `type:"string"` - // The ID of the created Maintenance Window. + // The ID of the created maintenance window. WindowId *string `min:"20" type:"string"` } @@ -33986,7 +34023,7 @@ type UpdateMaintenanceWindowTargetInput struct { Name *string `min:"3" type:"string"` // User-provided value that will be included in any CloudWatch events raised - // while running tasks for these targets in this Maintenance Window. + // while running tasks for these targets in this maintenance window. OwnerInformation *string `min:"1" type:"string" sensitive:"true"` // If True, then all fields that are required by the RegisterTargetWithMaintenanceWindow @@ -33997,7 +34034,7 @@ type UpdateMaintenanceWindowTargetInput struct { // The targets to add or replace. Targets []*Target `type:"list"` - // The Maintenance Window ID with which to modify the target. + // The maintenance window ID with which to modify the target. // // WindowId is a required field WindowId *string `min:"20" type:"string" required:"true"` @@ -34116,7 +34153,7 @@ type UpdateMaintenanceWindowTargetOutput struct { // The updated targets. Targets []*Target `type:"list"` - // The Maintenance Window ID specified in the update request. + // The maintenance window ID specified in the update request. WindowId *string `min:"20" type:"string"` // The target ID specified in the update request. @@ -34180,7 +34217,7 @@ type UpdateMaintenanceWindowTaskInput struct { // LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, // instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options - // for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. LoggingInfo *LoggingInfo `type:"structure"` // The new MaxConcurrency value you want to specify. MaxConcurrency is the number @@ -34203,20 +34240,18 @@ type UpdateMaintenanceWindowTaskInput struct { // specified are set to null. Replace *bool `type:"boolean"` - // The IAM service role ARN to modify. The system assumes this role during task - // execution. + // The ARN of the IAM service role for Systems Manager to assume when running + // a maintenance window task. If you do not specify a service role ARN, Systems + // Manager uses your account's service-linked role. If no service-linked role + // for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow. // - // If you do not specify a service role ARN, Systems Manager will use your account's - // service-linked role for Systems Manager by default. If no service-linked - // role for Systems Manager exists in your account, it will be created when - // you run RegisterTaskWithMaintenanceWindow without specifying a service role - // ARN. + // For more information, see the following topics in the in the AWS Systems + // Manager User Guide: // - // For more information, see Service-Linked Role Permissions for Systems Manager - // (http://docs.aws.amazon.com/systems-manager/latest/userguide/using-service-linked-roles.html#slr-permissions) - // and Should I Use a Service-Linked Role or a Custom Service Role to Run Maintenance - // Window Tasks? (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-maintenance-permissions.html#maintenance-window-tasks-service-role) - // in the AWS Systems Manager User Guide. + // * Service-Linked Role Permissions for Systems Manager (http://docs.aws.amazon.com/systems-manager/latest/userguide/using-service-linked-roles.html#slr-permissions) + // + // * Should I Use a Service-Linked Role or a Custom Service Role to Run Maintenance + // Window Tasks? (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-maintenance-permissions.html#maintenance-window-tasks-service-role) ServiceRoleArn *string `type:"string"` // The targets (either instances or tags) to modify. Instances are specified @@ -34236,7 +34271,7 @@ type UpdateMaintenanceWindowTaskInput struct { // TaskParameters has been deprecated. To specify parameters to pass to a task // when it runs, instead use the Parameters option in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options - // for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. // // The map has the following format: // @@ -34245,7 +34280,7 @@ type UpdateMaintenanceWindowTaskInput struct { // Value: an array of strings, each string is between 1 and 255 characters TaskParameters map[string]*MaintenanceWindowTaskParameterValueExpression `type:"map" sensitive:"true"` - // The Maintenance Window ID that contains the task to modify. + // The maintenance window ID that contains the task to modify. // // WindowId is a required field WindowId *string `min:"20" type:"string" required:"true"` @@ -34418,7 +34453,7 @@ type UpdateMaintenanceWindowTaskOutput struct { // LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, // instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options - // for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. LoggingInfo *LoggingInfo `type:"structure"` // The updated MaxConcurrency value. @@ -34433,7 +34468,8 @@ type UpdateMaintenanceWindowTaskOutput struct { // The updated priority value. Priority *int64 `type:"integer"` - // The updated service role ARN value. + // The ARN of the IAM service role to use to publish Amazon Simple Notification + // Service (Amazon SNS) notifications for maintenance window Run Command tasks. ServiceRoleArn *string `type:"string"` // The updated target values. @@ -34450,13 +34486,13 @@ type UpdateMaintenanceWindowTaskOutput struct { // TaskParameters has been deprecated. To specify parameters to pass to a task // when it runs, instead use the Parameters option in the TaskInvocationParameters // structure. For information about how Systems Manager handles these options - // for the supported Maintenance Window task types, see MaintenanceWindowTaskInvocationParameters. + // for the supported maintenance window task types, see MaintenanceWindowTaskInvocationParameters. TaskParameters map[string]*MaintenanceWindowTaskParameterValueExpression `type:"map" sensitive:"true"` - // The ID of the Maintenance Window that was updated. + // The ID of the maintenance window that was updated. WindowId *string `min:"20" type:"string"` - // The task ID of the Maintenance Window that was updated. + // The task ID of the maintenance window that was updated. WindowTaskId *string `min:"36" type:"string"` } diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssm/doc.go b/vendor/github.com/aws/aws-sdk-go/service/ssm/doc.go index 6964adba01b..48d6d3ee3e3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ssm/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ssm/doc.go @@ -15,7 +15,7 @@ // (http://docs.aws.amazon.com/systems-manager/latest/userguide/). // // To get started, verify prerequisites and configure managed instances. For -// more information, see Systems Manager Prerequisites (http://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-setting-up.html) +// more information, see Setting Up AWS Systems Manager (http://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-setting-up.html) // in the AWS Systems Manager User Guide. // // For information about other API actions you can perform on Amazon EC2 instances, diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssm/errors.go b/vendor/github.com/aws/aws-sdk-go/service/ssm/errors.go index cf1f3eb0e3b..a96509501d1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ssm/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ssm/errors.go @@ -126,8 +126,8 @@ const ( // ErrCodeDoesNotExistException for service response error code // "DoesNotExistException". // - // Error returned when the ID specified for a resource, such as a Maintenance - // Window or Patch baseline, doesn't exist. + // Error returned when the ID specified for a resource, such as a maintenance + // window or Patch baseline, doesn't exist. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -272,7 +272,7 @@ const ( // ErrCodeInvalidDeletionIdException for service response error code // "InvalidDeletionIdException". // - // The ID specified for the delete operation does not exist or is not valide. + // The ID specified for the delete operation does not exist or is not valid. // Verify the ID and try again. ErrCodeInvalidDeletionIdException = "InvalidDeletionIdException" @@ -513,7 +513,7 @@ const ( // "InvocationDoesNotExist". // // The command ID and instance ID you specified did not match any invocations. - // Verify the command ID adn the instance ID and try again. + // Verify the command ID and the instance ID and try again. ErrCodeInvocationDoesNotExist = "InvocationDoesNotExist" // ErrCodeItemContentMismatchException for service response error code @@ -620,7 +620,7 @@ const ( // "ResourceLimitExceededException". // // Error returned when the caller has exceeded the default resource limits. - // For example, too many Maintenance Windows or Patch baselines have been created. + // For example, too many maintenance windows or patch baselines have been created. // // For information about resource limits in Systems Manager, see AWS Systems // Manager Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_ssm). @@ -683,6 +683,12 @@ const ( // ErrCodeUnsupportedFeatureRequiredException for service response error code // "UnsupportedFeatureRequiredException". + // + // Microsoft application patching is only available on EC2 instances and Advanced + // Instances. To patch Microsoft applications on on-premises servers and VMs, + // you must enable Advanced Instances. For more information, see Using the Advanced-Instances + // Tier (http://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-managedinstances-advanced.html) + // in the AWS Systems Manager User Guide. ErrCodeUnsupportedFeatureRequiredException = "UnsupportedFeatureRequiredException" // ErrCodeUnsupportedInventoryItemContextException for service response error code diff --git a/vendor/github.com/bflad/tfproviderlint/cmd/tfproviderlint/tfproviderlint.go b/vendor/github.com/bflad/tfproviderlint/cmd/tfproviderlint/tfproviderlint.go new file mode 100644 index 00000000000..5d3b0163357 --- /dev/null +++ b/vendor/github.com/bflad/tfproviderlint/cmd/tfproviderlint/tfproviderlint.go @@ -0,0 +1,45 @@ +// The tfproviderlint command is a static checker for Terraform Providers. +// +// Each analyzer flag name is preceded by the analyzer name: -NAME.flag. +// In addition, the -NAME flag itself controls whether the +// diagnostics of that analyzer are displayed. (A disabled analyzer may yet +// be run if it is required by some other analyzer that is enabled.) +package main + +import ( + "golang.org/x/tools/go/analysis/multichecker" + + "github.com/bflad/tfproviderlint/passes/AT001" + "github.com/bflad/tfproviderlint/passes/AT002" + "github.com/bflad/tfproviderlint/passes/AT003" + "github.com/bflad/tfproviderlint/passes/AT004" + "github.com/bflad/tfproviderlint/passes/R001" + "github.com/bflad/tfproviderlint/passes/R002" + "github.com/bflad/tfproviderlint/passes/R003" + "github.com/bflad/tfproviderlint/passes/R004" + "github.com/bflad/tfproviderlint/passes/S001" + "github.com/bflad/tfproviderlint/passes/S002" + "github.com/bflad/tfproviderlint/passes/S003" + "github.com/bflad/tfproviderlint/passes/S004" + "github.com/bflad/tfproviderlint/passes/S005" + "github.com/bflad/tfproviderlint/passes/S006" +) + +func main() { + multichecker.Main( + AT001.Analyzer, + AT002.Analyzer, + AT003.Analyzer, + AT004.Analyzer, + R001.Analyzer, + R002.Analyzer, + R003.Analyzer, + R004.Analyzer, + S001.Analyzer, + S002.Analyzer, + S003.Analyzer, + S004.Analyzer, + S005.Analyzer, + S006.Analyzer, + ) +} diff --git a/vendor/github.com/bflad/tfproviderlint/passes/AT001/AT001.go b/vendor/github.com/bflad/tfproviderlint/passes/AT001/AT001.go new file mode 100644 index 00000000000..2a80481a07d --- /dev/null +++ b/vendor/github.com/bflad/tfproviderlint/passes/AT001/AT001.go @@ -0,0 +1,63 @@ +// Package AT001 defines an Analyzer that checks for +// TestCase missing CheckDestroy +package AT001 + +import ( + "go/ast" + + "golang.org/x/tools/go/analysis" + + "github.com/bflad/tfproviderlint/passes/acctestcase" + "github.com/bflad/tfproviderlint/passes/commentignore" +) + +const Doc = `check for TestCase missing CheckDestroy + +The AT001 analyzer reports likely incorrect uses of TestCase +which do not define a CheckDestroy function. CheckDestroy is used to verify +that test infrastructure has been removed at the end of an acceptance test. + +More information can be found at: +https://www.terraform.io/docs/extend/testing/acceptance-tests/testcase.html#checkdestroy` + +const analyzerName = "AT001" + +var Analyzer = &analysis.Analyzer{ + Name: analyzerName, + Doc: Doc, + Requires: []*analysis.Analyzer{ + acctestcase.Analyzer, + commentignore.Analyzer, + }, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + ignorer := pass.ResultOf[commentignore.Analyzer].(*commentignore.Ignorer) + testCases := pass.ResultOf[acctestcase.Analyzer].([]*ast.CompositeLit) + for _, testCase := range testCases { + if ignorer.ShouldIgnore(analyzerName, testCase) { + continue + } + + var found bool + + for _, elt := range testCase.Elts { + switch v := elt.(type) { + default: + continue + case *ast.KeyValueExpr: + if v.Key.(*ast.Ident).Name == "CheckDestroy" { + found = true + break + } + } + } + + if !found { + pass.Reportf(testCase.Type.(*ast.SelectorExpr).Sel.Pos(), "%s: missing CheckDestroy", analyzerName) + } + } + + return nil, nil +} diff --git a/vendor/github.com/bflad/tfproviderlint/passes/AT002/AT002.go b/vendor/github.com/bflad/tfproviderlint/passes/AT002/AT002.go new file mode 100644 index 00000000000..9f588f5e8f5 --- /dev/null +++ b/vendor/github.com/bflad/tfproviderlint/passes/AT002/AT002.go @@ -0,0 +1,51 @@ +// Package AT002 defines an Analyzer that checks for +// acceptance test names including the word import +package AT002 + +import ( + "go/ast" + "strings" + + "golang.org/x/tools/go/analysis" + + "github.com/bflad/tfproviderlint/passes/acctestfunc" + "github.com/bflad/tfproviderlint/passes/commentignore" +) + +const Doc = `check for acceptance test function names including the word import + +The AT002 analyzer reports where the word import or Import is used +in an acceptance test function name, which generally means there is an extraneous +acceptance test. ImportState testing should be included as a TestStep with each +applicable acceptance test, rather than a separate test that only verifies import +of a single test configuration.` + +const analyzerName = "AT002" + +var Analyzer = &analysis.Analyzer{ + Name: analyzerName, + Doc: Doc, + Requires: []*analysis.Analyzer{ + acctestfunc.Analyzer, + commentignore.Analyzer, + }, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + ignorer := pass.ResultOf[commentignore.Analyzer].(*commentignore.Ignorer) + testAccFuncs := pass.ResultOf[acctestfunc.Analyzer].([]*ast.FuncDecl) + for _, testAccFunc := range testAccFuncs { + if ignorer.ShouldIgnore(analyzerName, testAccFunc) { + continue + } + + funcName := testAccFunc.Name.Name + + if strings.Contains(funcName, "_import") || strings.Contains(funcName, "_Import") { + pass.Reportf(testAccFunc.Name.NamePos, "%s: acceptance test function name should not include import", analyzerName) + } + } + + return nil, nil +} diff --git a/vendor/github.com/bflad/tfproviderlint/passes/AT003/AT003.go b/vendor/github.com/bflad/tfproviderlint/passes/AT003/AT003.go new file mode 100644 index 00000000000..680d49e76f1 --- /dev/null +++ b/vendor/github.com/bflad/tfproviderlint/passes/AT003/AT003.go @@ -0,0 +1,47 @@ +// Package AT003 defines an Analyzer that checks for +// acceptance test names missing an underscore +package AT003 + +import ( + "go/ast" + "strings" + + "golang.org/x/tools/go/analysis" + + "github.com/bflad/tfproviderlint/passes/acctestfunc" + "github.com/bflad/tfproviderlint/passes/commentignore" +) + +const Doc = `check for acceptance test function names missing an underscore + +The AT003 analyzer reports where an underscore is not +present in the function name, which could make per-resource testing harder to +execute in larger providers or those with overlapping resource names.` + +const analyzerName = "AT003" + +var Analyzer = &analysis.Analyzer{ + Name: analyzerName, + Doc: Doc, + Requires: []*analysis.Analyzer{ + acctestfunc.Analyzer, + commentignore.Analyzer, + }, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + ignorer := pass.ResultOf[commentignore.Analyzer].(*commentignore.Ignorer) + testAccFuncs := pass.ResultOf[acctestfunc.Analyzer].([]*ast.FuncDecl) + for _, testAccFunc := range testAccFuncs { + if ignorer.ShouldIgnore(analyzerName, testAccFunc) { + continue + } + + if !strings.Contains(testAccFunc.Name.Name, "_") { + pass.Reportf(testAccFunc.Name.NamePos, "%s: acceptance test function name should include underscore", analyzerName) + } + } + + return nil, nil +} diff --git a/vendor/github.com/bflad/tfproviderlint/passes/AT004/AT004.go b/vendor/github.com/bflad/tfproviderlint/passes/AT004/AT004.go new file mode 100644 index 00000000000..dee5310ac00 --- /dev/null +++ b/vendor/github.com/bflad/tfproviderlint/passes/AT004/AT004.go @@ -0,0 +1,50 @@ +// Package AT004 defines an Analyzer that checks for +// TestStep Config containing provider configuration +package AT004 + +import ( + "go/ast" + "go/token" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const Doc = `check for TestStep Config containing provider configuration + +The AT004 analyzer reports likely incorrect uses of TestStep +Config which define a provider configuration. Provider configurations should +be handled outside individual test configurations (e.g. environment variables).` + +const analyzerName = "AT004" + +var Analyzer = &analysis.Analyzer{ + Name: analyzerName, + Doc: Doc, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.BasicLit)(nil), + } + inspect.Preorder(nodeFilter, func(n ast.Node) { + x := n.(*ast.BasicLit) + + if x.Kind != token.STRING { + return + } + + if !strings.Contains(x.Value, `provider "`) { + return + } + + pass.Reportf(x.ValuePos, "%s: provider declaration should be omitted", analyzerName) + }) + return nil, nil +} diff --git a/vendor/github.com/bflad/tfproviderlint/passes/R001/R001.go b/vendor/github.com/bflad/tfproviderlint/passes/R001/R001.go new file mode 100644 index 00000000000..bfb6f010866 --- /dev/null +++ b/vendor/github.com/bflad/tfproviderlint/passes/R001/R001.go @@ -0,0 +1,52 @@ +// Package R001 defines an Analyzer that checks for +// ResourceData.Set() calls using complex key argument +package R001 + +import ( + "go/ast" + + "golang.org/x/tools/go/analysis" + + "github.com/bflad/tfproviderlint/passes/commentignore" + "github.com/bflad/tfproviderlint/passes/resourcedataset" +) + +const Doc = `check for ResourceData.Set() calls using complex key argument + +The R001 analyzer reports a complex key argument for a Set() +call. It is preferred to explicitly use a string literal as the key argument.` + +const analyzerName = "R001" + +var Analyzer = &analysis.Analyzer{ + Name: analyzerName, + Doc: Doc, + Requires: []*analysis.Analyzer{ + resourcedataset.Analyzer, + commentignore.Analyzer, + }, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + ignorer := pass.ResultOf[commentignore.Analyzer].(*commentignore.Ignorer) + sets := pass.ResultOf[resourcedataset.Analyzer].([]*ast.CallExpr) + for _, set := range sets { + if ignorer.ShouldIgnore(analyzerName, set) { + continue + } + + if len(set.Args) < 2 { + continue + } + + switch v := set.Args[0].(type) { + default: + pass.Reportf(v.Pos(), "%s: ResourceData.Set() key argument should be string literal", analyzerName) + case *ast.BasicLit: + continue + } + } + + return nil, nil +} diff --git a/vendor/github.com/bflad/tfproviderlint/passes/R002/R002.go b/vendor/github.com/bflad/tfproviderlint/passes/R002/R002.go new file mode 100644 index 00000000000..2e30626ea22 --- /dev/null +++ b/vendor/github.com/bflad/tfproviderlint/passes/R002/R002.go @@ -0,0 +1,53 @@ +// Package R002 defines an Analyzer that checks for +// ResourceData.Set() calls using * dereferences +package R002 + +import ( + "go/ast" + + "golang.org/x/tools/go/analysis" + + "github.com/bflad/tfproviderlint/passes/commentignore" + "github.com/bflad/tfproviderlint/passes/resourcedataset" +) + +const Doc = `check for ResourceData.Set() calls using * dereferences + +The R002 analyzer reports likely extraneous uses of +star (*) dereferences for a Set() call. The Set() function automatically +handles pointers and * dereferences without nil checks can panic.` + +const analyzerName = "R002" + +var Analyzer = &analysis.Analyzer{ + Name: analyzerName, + Doc: Doc, + Requires: []*analysis.Analyzer{ + resourcedataset.Analyzer, + commentignore.Analyzer, + }, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + ignorer := pass.ResultOf[commentignore.Analyzer].(*commentignore.Ignorer) + sets := pass.ResultOf[resourcedataset.Analyzer].([]*ast.CallExpr) + for _, set := range sets { + if ignorer.ShouldIgnore(analyzerName, set) { + continue + } + + if len(set.Args) < 2 { + continue + } + + switch v := set.Args[1].(type) { + default: + continue + case *ast.StarExpr: + pass.Reportf(v.Pos(), "%s: ResourceData.Set() pointer value dereference is extraneous", analyzerName) + } + } + + return nil, nil +} diff --git a/vendor/github.com/bflad/tfproviderlint/passes/R003/R003.go b/vendor/github.com/bflad/tfproviderlint/passes/R003/R003.go new file mode 100644 index 00000000000..b8a0d8f8ff1 --- /dev/null +++ b/vendor/github.com/bflad/tfproviderlint/passes/R003/R003.go @@ -0,0 +1,54 @@ +// Package R003 defines an Analyzer that checks for +// Resource having Exists functions +package R003 + +import ( + "go/ast" + + "golang.org/x/tools/go/analysis" + + "github.com/bflad/tfproviderlint/passes/commentignore" + "github.com/bflad/tfproviderlint/passes/schemaresource" +) + +const Doc = `check for Resource having Exists functions + +The R003 analyzer reports likely extraneous uses of Exists +functions for a resource. Exists logic can be handled inside the Read function +to prevent logic duplication.` + +const analyzerName = "R003" + +var Analyzer = &analysis.Analyzer{ + Name: analyzerName, + Doc: Doc, + Requires: []*analysis.Analyzer{ + schemaresource.Analyzer, + commentignore.Analyzer, + }, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + ignorer := pass.ResultOf[commentignore.Analyzer].(*commentignore.Ignorer) + resources := pass.ResultOf[schemaresource.Analyzer].([]*ast.CompositeLit) + for _, resource := range resources { + if ignorer.ShouldIgnore(analyzerName, resource) { + continue + } + + for _, elt := range resource.Elts { + switch v := elt.(type) { + default: + continue + case *ast.KeyValueExpr: + if v.Key.(*ast.Ident).Name == "Exists" { + pass.Reportf(v.Key.Pos(), "%s: resource should not include Exists function", analyzerName) + break + } + } + } + } + + return nil, nil +} diff --git a/vendor/github.com/bflad/tfproviderlint/passes/R004/R004.go b/vendor/github.com/bflad/tfproviderlint/passes/R004/R004.go new file mode 100644 index 00000000000..42abb784c88 --- /dev/null +++ b/vendor/github.com/bflad/tfproviderlint/passes/R004/R004.go @@ -0,0 +1,116 @@ +// Package R004 defines an Analyzer that checks for +// ResourceData.Set() calls using incompatible value types +package R004 + +import ( + "go/ast" + "go/types" + "strings" + + "golang.org/x/tools/go/analysis" + + "github.com/bflad/tfproviderlint/passes/commentignore" + "github.com/bflad/tfproviderlint/passes/resourcedataset" +) + +const Doc = `check for ResourceData.Set() calls using incompatible value types + +The R004 analyzer reports incorrect types for a Set() call value. +The Set() function only supports a subset of basic types, slices and maps of that +subset of basic types, and the schema.Set type.` + +const analyzerName = "R004" + +var Analyzer = &analysis.Analyzer{ + Name: analyzerName, + Doc: Doc, + Requires: []*analysis.Analyzer{ + resourcedataset.Analyzer, + commentignore.Analyzer, + }, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + ignorer := pass.ResultOf[commentignore.Analyzer].(*commentignore.Ignorer) + sets := pass.ResultOf[resourcedataset.Analyzer].([]*ast.CallExpr) + for _, set := range sets { + if ignorer.ShouldIgnore(analyzerName, set) { + continue + } + + if len(set.Args) < 2 { + continue + } + + pos := set.Args[1].Pos() + t := pass.TypesInfo.TypeOf(set.Args[1]).Underlying() + + if !isAllowedType(t) { + pass.Reportf(pos, "%s: ResourceData.Set() incompatible value type: %s", analyzerName, t.String()) + } + } + + return nil, nil +} + +func isAllowedType(t types.Type) bool { + switch t := t.(type) { + default: + return false + case *types.Basic: + if !isAllowedBasicType(t) { + return false + } + case *types.Interface: + return true + case *types.Map: + switch k := t.Key().Underlying().(type) { + default: + return false + case *types.Basic: + if k.Kind() != types.String { + return false + } + + return isAllowedType(t.Elem().Underlying()) + } + case *types.Named: + if t.Obj().Name() != "Set" { + return false + } + // HasSuffix here due to vendoring + if !strings.HasSuffix(t.Obj().Pkg().Path(), "github.com/hashicorp/terraform/helper/schema") { + return false + } + case *types.Pointer: + return isAllowedType(t.Elem()) + case *types.Slice: + return isAllowedType(t.Elem().Underlying()) + } + + return true +} + +var allowedBasicKindTypes = []types.BasicKind{ + types.Bool, + types.Float32, + types.Float64, + types.Int, + types.Int8, + types.Int16, + types.Int32, + types.Int64, + types.String, + types.UntypedNil, +} + +func isAllowedBasicType(b *types.Basic) bool { + for _, allowedBasicKindType := range allowedBasicKindTypes { + if b.Kind() == allowedBasicKindType { + return true + } + } + + return false +} diff --git a/vendor/github.com/bflad/tfproviderlint/passes/S001/S001.go b/vendor/github.com/bflad/tfproviderlint/passes/S001/S001.go new file mode 100644 index 00000000000..9397ebf4985 --- /dev/null +++ b/vendor/github.com/bflad/tfproviderlint/passes/S001/S001.go @@ -0,0 +1,89 @@ +// Package S001 defines an Analyzer that checks for +// Schema of TypeList or TypeSet missing Elem +package S001 + +import ( + "go/ast" + "go/types" + "strings" + + "golang.org/x/tools/go/analysis" + + "github.com/bflad/tfproviderlint/passes/commentignore" + "github.com/bflad/tfproviderlint/passes/schemaschema" +) + +const Doc = `check for Schema of TypeList or TypeSet missing Elem + +The S001 analyzer reports cases of TypeList or TypeSet schemas missing Elem, +which will fail schema validation.` + +const analyzerName = "S001" + +var Analyzer = &analysis.Analyzer{ + Name: analyzerName, + Doc: Doc, + Requires: []*analysis.Analyzer{ + schemaschema.Analyzer, + commentignore.Analyzer, + }, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + ignorer := pass.ResultOf[commentignore.Analyzer].(*commentignore.Ignorer) + schemas := pass.ResultOf[schemaschema.Analyzer].([]*ast.CompositeLit) + for _, schema := range schemas { + if ignorer.ShouldIgnore(analyzerName, schema) { + continue + } + + var elemFound, typeListOrSet bool + + for _, elt := range schema.Elts { + switch v := elt.(type) { + default: + continue + case *ast.KeyValueExpr: + name := v.Key.(*ast.Ident).Name + + if name == "Elem" { + elemFound = true + continue + } + + if name != "Type" { + continue + } + + switch v := v.Value.(type) { + default: + continue + case *ast.SelectorExpr: + // Use AST over TypesInfo here as schema uses ValueType + if v.Sel.Name != "TypeList" && v.Sel.Name != "TypeSet" { + continue + } + + switch t := pass.TypesInfo.TypeOf(v).(type) { + default: + continue + case *types.Named: + // HasSuffix here due to vendoring + if !strings.HasSuffix(t.Obj().Pkg().Path(), "github.com/hashicorp/terraform/helper/schema") { + continue + } + + typeListOrSet = true + } + } + } + } + + if typeListOrSet && !elemFound { + pass.Reportf(schema.Type.(*ast.SelectorExpr).Sel.Pos(), "%s: schema of TypeList or TypeSet should include Elem", analyzerName) + } + } + + return nil, nil +} diff --git a/vendor/github.com/bflad/tfproviderlint/passes/S002/S002.go b/vendor/github.com/bflad/tfproviderlint/passes/S002/S002.go new file mode 100644 index 00000000000..276224d1478 --- /dev/null +++ b/vendor/github.com/bflad/tfproviderlint/passes/S002/S002.go @@ -0,0 +1,78 @@ +// Package S002 defines an Analyzer that checks for +// Schema with both Required and Optional enabled +package S002 + +import ( + "go/ast" + + "golang.org/x/tools/go/analysis" + + "github.com/bflad/tfproviderlint/passes/commentignore" + "github.com/bflad/tfproviderlint/passes/schemaschema" +) + +const Doc = `check for Schema with both Required and Optional enabled + +The S002 analyzer reports cases of schemas which enables both Required +and Optional, which will fail provider schema validation.` + +const analyzerName = "S002" + +var Analyzer = &analysis.Analyzer{ + Name: analyzerName, + Doc: Doc, + Requires: []*analysis.Analyzer{ + schemaschema.Analyzer, + commentignore.Analyzer, + }, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + ignorer := pass.ResultOf[commentignore.Analyzer].(*commentignore.Ignorer) + schemas := pass.ResultOf[schemaschema.Analyzer].([]*ast.CompositeLit) + for _, schema := range schemas { + if ignorer.ShouldIgnore(analyzerName, schema) { + continue + } + + var optionalEnabled, requiredEnabled bool + + for _, elt := range schema.Elts { + switch v := elt.(type) { + default: + continue + case *ast.KeyValueExpr: + name := v.Key.(*ast.Ident).Name + + if name != "Optional" && name != "Required" { + continue + } + + switch v := v.Value.(type) { + default: + continue + case *ast.Ident: + value := v.Name + + if value != "true" { + continue + } + + if name == "Optional" { + optionalEnabled = true + continue + } + + requiredEnabled = true + } + } + } + + if optionalEnabled && requiredEnabled { + pass.Reportf(schema.Type.(*ast.SelectorExpr).Sel.Pos(), "%s: schema should not enable Required and Optional", analyzerName) + } + } + + return nil, nil +} diff --git a/vendor/github.com/bflad/tfproviderlint/passes/S003/S003.go b/vendor/github.com/bflad/tfproviderlint/passes/S003/S003.go new file mode 100644 index 00000000000..acf370c1342 --- /dev/null +++ b/vendor/github.com/bflad/tfproviderlint/passes/S003/S003.go @@ -0,0 +1,78 @@ +// Package S003 defines an Analyzer that checks for +// Schema with both Required and Computed enabled +package S003 + +import ( + "go/ast" + + "golang.org/x/tools/go/analysis" + + "github.com/bflad/tfproviderlint/passes/commentignore" + "github.com/bflad/tfproviderlint/passes/schemaschema" +) + +const Doc = `check for Schema with both Required and Computed enabled + +The S003 analyzer reports cases of schemas which enables both Required +and Computed, which will fail provider schema validation.` + +const analyzerName = "S003" + +var Analyzer = &analysis.Analyzer{ + Name: analyzerName, + Doc: Doc, + Requires: []*analysis.Analyzer{ + schemaschema.Analyzer, + commentignore.Analyzer, + }, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + ignorer := pass.ResultOf[commentignore.Analyzer].(*commentignore.Ignorer) + schemas := pass.ResultOf[schemaschema.Analyzer].([]*ast.CompositeLit) + for _, schema := range schemas { + if ignorer.ShouldIgnore(analyzerName, schema) { + continue + } + + var computedEnabled, requiredEnabled bool + + for _, elt := range schema.Elts { + switch v := elt.(type) { + default: + continue + case *ast.KeyValueExpr: + name := v.Key.(*ast.Ident).Name + + if name != "Computed" && name != "Required" { + continue + } + + switch v := v.Value.(type) { + default: + continue + case *ast.Ident: + value := v.Name + + if value != "true" { + continue + } + + if name == "Computed" { + computedEnabled = true + continue + } + + requiredEnabled = true + } + } + } + + if computedEnabled && requiredEnabled { + pass.Reportf(schema.Type.(*ast.SelectorExpr).Sel.Pos(), "%s: schema should not enable Required and Computed", analyzerName) + } + } + + return nil, nil +} diff --git a/vendor/github.com/bflad/tfproviderlint/passes/S004/S004.go b/vendor/github.com/bflad/tfproviderlint/passes/S004/S004.go new file mode 100644 index 00000000000..42632d48e65 --- /dev/null +++ b/vendor/github.com/bflad/tfproviderlint/passes/S004/S004.go @@ -0,0 +1,82 @@ +// Package S004 defines an Analyzer that checks for +// Schema with Required enabled and Default configured +package S004 + +import ( + "go/ast" + + "golang.org/x/tools/go/analysis" + + "github.com/bflad/tfproviderlint/passes/commentignore" + "github.com/bflad/tfproviderlint/passes/schemaschema" +) + +const Doc = `check for Schema with Required enabled and Default configured + +The S004 analyzer reports cases of schemas which enables Required +and configures Default, which will fail provider schema validation.` + +const analyzerName = "S004" + +var Analyzer = &analysis.Analyzer{ + Name: analyzerName, + Doc: Doc, + Requires: []*analysis.Analyzer{ + schemaschema.Analyzer, + commentignore.Analyzer, + }, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + ignorer := pass.ResultOf[commentignore.Analyzer].(*commentignore.Ignorer) + schemas := pass.ResultOf[schemaschema.Analyzer].([]*ast.CompositeLit) + for _, schema := range schemas { + if ignorer.ShouldIgnore(analyzerName, schema) { + continue + } + + var defaultConfigured, requiredEnabled bool + + for _, elt := range schema.Elts { + switch v := elt.(type) { + default: + continue + case *ast.KeyValueExpr: + name := v.Key.(*ast.Ident).Name + + if name != "Default" && name != "Required" { + continue + } + + switch v := v.Value.(type) { + default: + if name == "Default" { + defaultConfigured = true + } + + continue + case *ast.Ident: + value := v.Name + + if name == "Default" && value != "nil" { + defaultConfigured = true + continue + } + + if value != "true" { + continue + } + + requiredEnabled = true + } + } + } + + if defaultConfigured && requiredEnabled { + pass.Reportf(schema.Type.(*ast.SelectorExpr).Sel.Pos(), "%s: schema should not enable Required and configure Default", analyzerName) + } + } + + return nil, nil +} diff --git a/vendor/github.com/bflad/tfproviderlint/passes/S005/S005.go b/vendor/github.com/bflad/tfproviderlint/passes/S005/S005.go new file mode 100644 index 00000000000..9da967752c6 --- /dev/null +++ b/vendor/github.com/bflad/tfproviderlint/passes/S005/S005.go @@ -0,0 +1,82 @@ +// Package S005 defines an Analyzer that checks for +// Schema with Computed enabled and Default configured +package S005 + +import ( + "go/ast" + + "golang.org/x/tools/go/analysis" + + "github.com/bflad/tfproviderlint/passes/commentignore" + "github.com/bflad/tfproviderlint/passes/schemaschema" +) + +const Doc = `check for Schema with Computed enabled and Default configured + +The S005 analyzer reports cases of schemas which enables Computed +and configures Default, which will fail provider schema validation.` + +const analyzerName = "S005" + +var Analyzer = &analysis.Analyzer{ + Name: analyzerName, + Doc: Doc, + Requires: []*analysis.Analyzer{ + schemaschema.Analyzer, + commentignore.Analyzer, + }, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + ignorer := pass.ResultOf[commentignore.Analyzer].(*commentignore.Ignorer) + schemas := pass.ResultOf[schemaschema.Analyzer].([]*ast.CompositeLit) + for _, schema := range schemas { + if ignorer.ShouldIgnore(analyzerName, schema) { + continue + } + + var computedEnabled, defaultConfigured bool + + for _, elt := range schema.Elts { + switch v := elt.(type) { + default: + continue + case *ast.KeyValueExpr: + name := v.Key.(*ast.Ident).Name + + if name != "Default" && name != "Computed" { + continue + } + + switch v := v.Value.(type) { + default: + if name == "Default" { + defaultConfigured = true + } + + continue + case *ast.Ident: + value := v.Name + + if name == "Default" && value != "nil" { + defaultConfigured = true + continue + } + + if value != "true" { + continue + } + + computedEnabled = true + } + } + } + + if computedEnabled && defaultConfigured { + pass.Reportf(schema.Type.(*ast.SelectorExpr).Sel.Pos(), "%s: schema should not enable Computed and configure Default", analyzerName) + } + } + + return nil, nil +} diff --git a/vendor/github.com/bflad/tfproviderlint/passes/S006/S006.go b/vendor/github.com/bflad/tfproviderlint/passes/S006/S006.go new file mode 100644 index 00000000000..aacfff3368b --- /dev/null +++ b/vendor/github.com/bflad/tfproviderlint/passes/S006/S006.go @@ -0,0 +1,91 @@ +// Package S006 defines an Analyzer that checks for +// Schema of TypeMap missing Elem +package S006 + +import ( + "go/ast" + "go/types" + "strings" + + "golang.org/x/tools/go/analysis" + + "github.com/bflad/tfproviderlint/passes/commentignore" + "github.com/bflad/tfproviderlint/passes/schemaschema" +) + +const Doc = `check for Schema of TypeMap missing Elem + +The S006 analyzer reports cases of TypeMap schemas missing Elem, +which currently passes Terraform schema validation, but breaks downstream tools +and may be required in the future.` + +const analyzerName = "S006" + +var Analyzer = &analysis.Analyzer{ + Name: analyzerName, + Doc: Doc, + Requires: []*analysis.Analyzer{ + schemaschema.Analyzer, + commentignore.Analyzer, + }, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + ignorer := pass.ResultOf[commentignore.Analyzer].(*commentignore.Ignorer) + schemas := pass.ResultOf[schemaschema.Analyzer].([]*ast.CompositeLit) + for _, schema := range schemas { + if ignorer.ShouldIgnore(analyzerName, schema) { + continue + } + + var elemFound bool + var typeMap bool + + for _, elt := range schema.Elts { + switch v := elt.(type) { + default: + continue + case *ast.KeyValueExpr: + name := v.Key.(*ast.Ident).Name + + if name == "Elem" { + elemFound = true + continue + } + + if name != "Type" { + continue + } + + switch v := v.Value.(type) { + default: + continue + case *ast.SelectorExpr: + // Use AST over TypesInfo here as schema uses ValueType + if v.Sel.Name != "TypeMap" { + continue + } + + switch t := pass.TypesInfo.TypeOf(v).(type) { + default: + continue + case *types.Named: + // HasSuffix here due to vendoring + if !strings.HasSuffix(t.Obj().Pkg().Path(), "github.com/hashicorp/terraform/helper/schema") { + continue + } + + typeMap = true + } + } + } + } + + if typeMap && !elemFound { + pass.Reportf(schema.Type.(*ast.SelectorExpr).Sel.Pos(), "%s: schema of TypeMap should include Elem", analyzerName) + } + } + + return nil, nil +} diff --git a/vendor/github.com/bflad/tfproviderlint/passes/acctestcase/acctestcase.go b/vendor/github.com/bflad/tfproviderlint/passes/acctestcase/acctestcase.go new file mode 100644 index 00000000000..b16c4820728 --- /dev/null +++ b/vendor/github.com/bflad/tfproviderlint/passes/acctestcase/acctestcase.go @@ -0,0 +1,63 @@ +package acctestcase + +import ( + "go/ast" + "go/types" + "reflect" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +var Analyzer = &analysis.Analyzer{ + Name: "acctestcase", + Doc: "find github.com/hashicorp/terraform/helper/resource.TestCase literals for later passes", + Requires: []*analysis.Analyzer{ + inspect.Analyzer, + }, + Run: run, + ResultType: reflect.TypeOf([]*ast.CompositeLit{}), +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeFilter := []ast.Node{ + (*ast.CompositeLit)(nil), + } + var result []*ast.CompositeLit + + inspect.Preorder(nodeFilter, func(n ast.Node) { + x := n.(*ast.CompositeLit) + + if !isResourceTestCase(pass, x) { + return + } + + result = append(result, x) + }) + + return result, nil +} + +func isResourceTestCase(pass *analysis.Pass, cl *ast.CompositeLit) bool { + switch v := cl.Type.(type) { + default: + return false + case *ast.SelectorExpr: + switch t := pass.TypesInfo.TypeOf(v).(type) { + default: + return false + case *types.Named: + if t.Obj().Name() != "TestCase" { + return false + } + // HasSuffix here due to vendoring + if !strings.HasSuffix(t.Obj().Pkg().Path(), "github.com/hashicorp/terraform/helper/resource") { + return false + } + } + } + return true +} diff --git a/vendor/github.com/bflad/tfproviderlint/passes/acctestfunc/acctestfunc.go b/vendor/github.com/bflad/tfproviderlint/passes/acctestfunc/acctestfunc.go new file mode 100644 index 00000000000..3ccc3f2ca26 --- /dev/null +++ b/vendor/github.com/bflad/tfproviderlint/passes/acctestfunc/acctestfunc.go @@ -0,0 +1,41 @@ +package acctestfunc + +import ( + "go/ast" + "reflect" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +var Analyzer = &analysis.Analyzer{ + Name: "acctestfunc", + Doc: "find function names starting with TestAcc for later passes", + Requires: []*analysis.Analyzer{ + inspect.Analyzer, + }, + Run: run, + ResultType: reflect.TypeOf([]*ast.FuncDecl{}), +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeFilter := []ast.Node{ + (*ast.FuncDecl)(nil), + } + var result []*ast.FuncDecl + + inspect.Preorder(nodeFilter, func(n ast.Node) { + x := n.(*ast.FuncDecl) + + if !strings.HasPrefix(x.Name.Name, "TestAcc") { + return + } + + result = append(result, x) + }) + + return result, nil +} diff --git a/vendor/github.com/bflad/tfproviderlint/passes/commentignore/ignore.go b/vendor/github.com/bflad/tfproviderlint/passes/commentignore/ignore.go new file mode 100644 index 00000000000..ba4778d28a9 --- /dev/null +++ b/vendor/github.com/bflad/tfproviderlint/passes/commentignore/ignore.go @@ -0,0 +1,60 @@ +package commentignore + +import ( + "go/ast" + "go/token" + "reflect" + "strings" + + "golang.org/x/tools/go/analysis" +) + +const commentIgnorePrefix = "lintignore:" + +var Analyzer = &analysis.Analyzer{ + Name: "commentignore", + Doc: "find ignore comments for later passes", + Run: run, + ResultType: reflect.TypeOf(new(Ignorer)), +} + +type ignore struct { + Pos token.Pos + End token.Pos +} + +type Ignorer struct { + ignores map[string][]ignore +} + +func (ignorer *Ignorer) ShouldIgnore(key string, n ast.Node) bool { + for _, ig := range ignorer.ignores[key] { + if ig.Pos <= n.Pos() && ig.End >= n.End() { + return true + } + } + + return false +} + +func run(pass *analysis.Pass) (interface{}, error) { + ignores := map[string][]ignore{} + for _, f := range pass.Files { + cmap := ast.NewCommentMap(pass.Fset, f, f.Comments) + for n, cgs := range cmap { + for _, cg := range cgs { + if strings.HasPrefix(cg.Text(), commentIgnorePrefix) { + key := strings.TrimPrefix(cg.Text(), commentIgnorePrefix) + key = strings.TrimSpace(key) + + // is it possible for nested pos/end to be outside the largest nodes? + ignores[key] = append(ignores[key], ignore{n.Pos(), n.End()}) + } + } + } + } + + return &Ignorer{ + ignores: ignores, + }, nil +} diff --git a/vendor/github.com/bflad/tfproviderlint/passes/resourcedataset/resourcedataset.go b/vendor/github.com/bflad/tfproviderlint/passes/resourcedataset/resourcedataset.go new file mode 100644 index 00000000000..28d74f0055c --- /dev/null +++ b/vendor/github.com/bflad/tfproviderlint/passes/resourcedataset/resourcedataset.go @@ -0,0 +1,113 @@ +package resourcedataset + +import ( + "go/ast" + "go/types" + "reflect" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +var Analyzer = &analysis.Analyzer{ + Name: "resourcedataset", + Doc: "find github.com/hashicorp/terraform/helper/schema.ResourceData.Set() calls for later passes", + Requires: []*analysis.Analyzer{ + inspect.Analyzer, + }, + Run: run, + ResultType: reflect.TypeOf([]*ast.CallExpr{}), +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeFilter := []ast.Node{ + (*ast.CallExpr)(nil), + } + var result []*ast.CallExpr + + inspect.Preorder(nodeFilter, func(n ast.Node) { + x := n.(*ast.CallExpr) + + if !isResourceDataSet(pass, x) { + return + } + + result = append(result, x) + }) + + return result, nil +} + +func isResourceDataSet(pass *analysis.Pass, ce *ast.CallExpr) bool { + switch f := ce.Fun.(type) { + default: + return false + case *ast.SelectorExpr: + if f.Sel.Name != "Set" { + return false + } + + switch x := f.X.(type) { + default: + return false + case *ast.Ident: + if x.Obj == nil { + return false + } + + switch decl := x.Obj.Decl.(type) { + default: + return false + case *ast.Field: + switch t := decl.Type.(type) { + default: + return false + case *ast.StarExpr: + switch t := pass.TypesInfo.TypeOf(t.X).(type) { + default: + return false + case *types.Named: + if !isSchemaResourceData(t) { + return false + } + } + case *ast.SelectorExpr: + switch t := pass.TypesInfo.TypeOf(t).(type) { + default: + return false + case *types.Named: + if !isSchemaResourceData(t) { + return false + } + } + } + case *ast.ValueSpec: + switch t := pass.TypesInfo.TypeOf(decl.Type).(type) { + default: + return false + case *types.Named: + if !isSchemaResourceData(t) { + return false + } + } + } + } + } + return true +} + +func isSchemaResourceData(t *types.Named) bool { + if t.Obj().Name() != "ResourceData" { + return false + } + + // HasSuffix here due to vendoring + if !strings.HasSuffix(t.Obj().Pkg().Path(), "github.com/hashicorp/terraform/helper/schema") { + return false + } + + return true +} diff --git a/vendor/github.com/bflad/tfproviderlint/passes/schemaresource/README.md b/vendor/github.com/bflad/tfproviderlint/passes/schemaresource/README.md new file mode 100644 index 00000000000..63b33438ff5 --- /dev/null +++ b/vendor/github.com/bflad/tfproviderlint/passes/schemaresource/README.md @@ -0,0 +1,9 @@ +# passes/schemaresource + +This pass only works with Terraform resources that are fully defined in a single function: + +```go +func someResourceFunc() *schema.Resource { + return &schema.Resource{ /* ... entire resource ... */ } +} +``` diff --git a/vendor/github.com/bflad/tfproviderlint/passes/schemaresource/schemaresource.go b/vendor/github.com/bflad/tfproviderlint/passes/schemaresource/schemaresource.go new file mode 100644 index 00000000000..d341038898c --- /dev/null +++ b/vendor/github.com/bflad/tfproviderlint/passes/schemaresource/schemaresource.go @@ -0,0 +1,63 @@ +package schemaresource + +import ( + "go/ast" + "go/types" + "reflect" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +var Analyzer = &analysis.Analyzer{ + Name: "schemaresource", + Doc: "find github.com/hashicorp/terraform/helper/schema.Resource literals for later passes", + Requires: []*analysis.Analyzer{ + inspect.Analyzer, + }, + Run: run, + ResultType: reflect.TypeOf([]*ast.CompositeLit{}), +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeFilter := []ast.Node{ + (*ast.CompositeLit)(nil), + } + var result []*ast.CompositeLit + + inspect.Preorder(nodeFilter, func(n ast.Node) { + x := n.(*ast.CompositeLit) + + if !isSchemaResource(pass, x) { + return + } + + result = append(result, x) + }) + + return result, nil +} + +func isSchemaResource(pass *analysis.Pass, cl *ast.CompositeLit) bool { + switch v := cl.Type.(type) { + default: + return false + case *ast.SelectorExpr: + switch t := pass.TypesInfo.TypeOf(v).(type) { + default: + return false + case *types.Named: + if t.Obj().Name() != "Resource" { + return false + } + // HasSuffix here due to vendoring + if !strings.HasSuffix(t.Obj().Pkg().Path(), "github.com/hashicorp/terraform/helper/schema") { + return false + } + } + } + return true +} diff --git a/vendor/github.com/bflad/tfproviderlint/passes/schemaschema/README.md b/vendor/github.com/bflad/tfproviderlint/passes/schemaschema/README.md new file mode 100644 index 00000000000..18fdcb8a96d --- /dev/null +++ b/vendor/github.com/bflad/tfproviderlint/passes/schemaschema/README.md @@ -0,0 +1,12 @@ +# passes/schemaschema + +This pass only works with Terraform schema that are fully defined: + +```go +&schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: /* ... */, +}, +``` diff --git a/vendor/github.com/bflad/tfproviderlint/passes/schemaschema/schemaschema.go b/vendor/github.com/bflad/tfproviderlint/passes/schemaschema/schemaschema.go new file mode 100644 index 00000000000..d193498a04b --- /dev/null +++ b/vendor/github.com/bflad/tfproviderlint/passes/schemaschema/schemaschema.go @@ -0,0 +1,63 @@ +package schemaschema + +import ( + "go/ast" + "go/types" + "reflect" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +var Analyzer = &analysis.Analyzer{ + Name: "schemaschema", + Doc: "find github.com/hashicorp/terraform/helper/schema.Schema literals for later passes", + Requires: []*analysis.Analyzer{ + inspect.Analyzer, + }, + Run: run, + ResultType: reflect.TypeOf([]*ast.CompositeLit{}), +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeFilter := []ast.Node{ + (*ast.CompositeLit)(nil), + } + var result []*ast.CompositeLit + + inspect.Preorder(nodeFilter, func(n ast.Node) { + x := n.(*ast.CompositeLit) + + if !isSchemaSchema(pass, x) { + return + } + + result = append(result, x) + }) + + return result, nil +} + +func isSchemaSchema(pass *analysis.Pass, cl *ast.CompositeLit) bool { + switch v := cl.Type.(type) { + default: + return false + case *ast.SelectorExpr: + switch t := pass.TypesInfo.TypeOf(v).(type) { + default: + return false + case *types.Named: + if t.Obj().Name() != "Schema" { + return false + } + // HasSuffix here due to vendoring + if !strings.HasSuffix(t.Obj().Pkg().Path(), "github.com/hashicorp/terraform/helper/schema") { + return false + } + } + } + return true +} diff --git a/vendor/github.com/golang/mock/gomock/controller.go b/vendor/github.com/golang/mock/gomock/controller.go index 6fde25f5087..0651c91e444 100644 --- a/vendor/github.com/golang/mock/gomock/controller.go +++ b/vendor/github.com/golang/mock/gomock/controller.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// GoMock - a mock framework for Go. +// Package gomock is a mock framework for Go. // // Standard usage: // (1) Define an interface that you wish to mock. @@ -63,8 +63,8 @@ import ( "sync" ) -// A TestReporter is something that can be used to report test failures. -// It is satisfied by the standard library's *testing.T. +// A TestReporter is something that can be used to report test failures. It +// is satisfied by the standard library's *testing.T. type TestReporter interface { Errorf(format string, args ...interface{}) Fatalf(format string, args ...interface{}) @@ -77,14 +77,35 @@ type TestHelper interface { Helper() } -// A Controller represents the top-level control of a mock ecosystem. -// It defines the scope and lifetime of mock objects, as well as their expectations. -// It is safe to call Controller's methods from multiple goroutines. +// A Controller represents the top-level control of a mock ecosystem. It +// defines the scope and lifetime of mock objects, as well as their +// expectations. It is safe to call Controller's methods from multiple +// goroutines. Each test should create a new Controller and invoke Finish via +// defer. +// +// func TestFoo(t *testing.T) { +// ctrl := gomock.NewController(st) +// defer ctrl.Finish() +// // .. +// } +// +// func TestBar(t *testing.T) { +// t.Run("Sub-Test-1", st) { +// ctrl := gomock.NewController(st) +// defer ctrl.Finish() +// // .. +// }) +// t.Run("Sub-Test-2", st) { +// ctrl := gomock.NewController(st) +// defer ctrl.Finish() +// // .. +// }) +// }) type Controller struct { // T should only be called within a generated mock. It is not intended to // be used in user code and may be changed in future versions. T is the // TestReporter passed in when creating the Controller via NewController. - // If the TestReporter does not implment a TestHelper it will be wrapped + // If the TestReporter does not implement a TestHelper it will be wrapped // with a nopTestHelper. T TestHelper mu sync.Mutex @@ -92,6 +113,8 @@ type Controller struct { finished bool } +// NewController returns a new Controller. It is the preferred way to create a +// Controller. func NewController(t TestReporter) *Controller { h, ok := t.(TestHelper) if !ok { @@ -135,6 +158,7 @@ type nopTestHelper struct { func (h nopTestHelper) Helper() {} +// RecordCall is called by a mock. It should not be called by user code. func (ctrl *Controller) RecordCall(receiver interface{}, method string, args ...interface{}) *Call { ctrl.T.Helper() @@ -148,6 +172,7 @@ func (ctrl *Controller) RecordCall(receiver interface{}, method string, args ... panic("unreachable") } +// RecordCallWithMethodType is called by a mock. It should not be called by user code. func (ctrl *Controller) RecordCallWithMethodType(receiver interface{}, method string, methodType reflect.Type, args ...interface{}) *Call { ctrl.T.Helper() @@ -160,6 +185,7 @@ func (ctrl *Controller) RecordCallWithMethodType(receiver interface{}, method st return call } +// Call is called by a mock. It should not be called by user code. func (ctrl *Controller) Call(receiver interface{}, method string, args ...interface{}) []interface{} { ctrl.T.Helper() @@ -200,6 +226,9 @@ func (ctrl *Controller) Call(receiver interface{}, method string, args ...interf return rets } +// Finish checks to see if all the methods that were expected to be called +// were called. It should be invoked for each Controller. It is not idempotent +// and therefore can only be invoked once. func (ctrl *Controller) Finish() { ctrl.T.Helper() diff --git a/vendor/github.com/golang/mock/gomock/matchers.go b/vendor/github.com/golang/mock/gomock/matchers.go index 189796f8656..fbff06062fa 100644 --- a/vendor/github.com/golang/mock/gomock/matchers.go +++ b/vendor/github.com/golang/mock/gomock/matchers.go @@ -98,9 +98,30 @@ func (m assignableToTypeOfMatcher) String() string { } // Constructors -func Any() Matcher { return anyMatcher{} } +// Any returns a matcher that always matches. +func Any() Matcher { return anyMatcher{} } + +// Eq returns a matcher that matches on equality. +// +// Example usage: +// Eq(5).Matches(5) // returns true +// Eq(5).Matches(4) // returns false func Eq(x interface{}) Matcher { return eqMatcher{x} } -func Nil() Matcher { return nilMatcher{} } + +// Nil returns a matcher that matches if the received value is nil. +// +// Example usage: +// var x *bytes.Buffer +// Nil().Matches(x) // returns true +// x = &bytes.Buffer{} +// Nil().Matches(x) // returns false +func Nil() Matcher { return nilMatcher{} } + +// Not reverses the results of its given child matcher. +// +// Example usage: +// Not(Eq(5)).Matches(4) // returns true +// Not(Eq(5)).Matches(5) // returns false func Not(x interface{}) Matcher { if m, ok := x.(Matcher); ok { return notMatcher{m} @@ -112,11 +133,9 @@ func Not(x interface{}) Matcher { // function is assignable to the type of the parameter to this function. // // Example usage: -// -// dbMock.EXPECT(). -// Insert(gomock.AssignableToTypeOf(&EmployeeRecord{})). -// Return(errors.New("DB error")) -// +// var s fmt.Stringer = &bytes.Buffer{} +// AssignableToTypeOf(s).Matches(time.Second) // returns true +// AssignableToTypeOf(s).Matches(99) // returns false func AssignableToTypeOf(x interface{}) Matcher { return assignableToTypeOfMatcher{reflect.TypeOf(x)} } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/load.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/load.go index bddbd4e59d6..1f704ad546c 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/lint/load.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/load.go @@ -168,7 +168,9 @@ func stringifyLoadMode(mode packages.LoadMode) string { return "load types" case packages.LoadSyntax: return "load types and syntax" - case packages.LoadAllSyntax: + } + // it may be an alias, and may be not + if mode == packages.LoadAllSyntax { return "load deps types and syntax" } return "unknown" diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go index 7e215f22029..2133562b01c 100644 --- a/vendor/github.com/google/go-cmp/cmp/compare.go +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -29,26 +29,17 @@ package cmp import ( "fmt" "reflect" + "strings" "github.com/google/go-cmp/cmp/internal/diff" + "github.com/google/go-cmp/cmp/internal/flags" "github.com/google/go-cmp/cmp/internal/function" "github.com/google/go-cmp/cmp/internal/value" ) -// BUG(dsnet): Maps with keys containing NaN values cannot be properly compared due to -// the reflection package's inability to retrieve such entries. Equal will panic -// anytime it comes across a NaN key, but this behavior may change. -// -// See https://golang.org/issue/11104 for more details. - -var nothing = reflect.Value{} - // Equal reports whether x and y are equal by recursively applying the // following rules in the given order to x and y and all of their sub-values: // -// • If two values are not of the same type, then they are never equal -// and the overall result is false. -// // • Let S be the set of all Ignore, Transformer, and Comparer options that // remain after applying all path filters, value filters, and type filters. // If at least one Ignore exists in S, then the comparison is ignored. @@ -61,43 +52,79 @@ var nothing = reflect.Value{} // // • If the values have an Equal method of the form "(T) Equal(T) bool" or // "(T) Equal(I) bool" where T is assignable to I, then use the result of -// x.Equal(y) even if x or y is nil. -// Otherwise, no such method exists and evaluation proceeds to the next rule. +// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and +// evaluation proceeds to the next rule. // // • Lastly, try to compare x and y based on their basic kinds. // Simple kinds like booleans, integers, floats, complex numbers, strings, and // channels are compared using the equivalent of the == operator in Go. // Functions are only equal if they are both nil, otherwise they are unequal. -// Pointers are equal if the underlying values they point to are also equal. -// Interfaces are equal if their underlying concrete values are also equal. // -// Structs are equal if all of their fields are equal. If a struct contains -// unexported fields, Equal panics unless the AllowUnexported option is used or -// an Ignore option (e.g., cmpopts.IgnoreUnexported) ignores that field. +// Structs are equal if recursively calling Equal on all fields report equal. +// If a struct contains unexported fields, Equal panics unless an Ignore option +// (e.g., cmpopts.IgnoreUnexported) ignores that field or the AllowUnexported +// option explicitly permits comparing the unexported field. +// +// Slices are equal if they are both nil or both non-nil, where recursively +// calling Equal on all non-ignored slice or array elements report equal. +// Empty non-nil slices and nil slices are not equal; to equate empty slices, +// consider using cmpopts.EquateEmpty. // -// Arrays, slices, and maps are equal if they are both nil or both non-nil -// with the same length and the elements at each index or key are equal. -// Note that a non-nil empty slice and a nil slice are not equal. -// To equate empty slices and maps, consider using cmpopts.EquateEmpty. +// Maps are equal if they are both nil or both non-nil, where recursively +// calling Equal on all non-ignored map entries report equal. // Map keys are equal according to the == operator. // To use custom comparisons for map keys, consider using cmpopts.SortMaps. +// Empty non-nil maps and nil maps are not equal; to equate empty maps, +// consider using cmpopts.EquateEmpty. +// +// Pointers and interfaces are equal if they are both nil or both non-nil, +// where they have the same underlying concrete type and recursively +// calling Equal on the underlying values reports equal. func Equal(x, y interface{}, opts ...Option) bool { + vx := reflect.ValueOf(x) + vy := reflect.ValueOf(y) + + // If the inputs are different types, auto-wrap them in an empty interface + // so that they have the same parent type. + var t reflect.Type + if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() { + t = reflect.TypeOf((*interface{})(nil)).Elem() + if vx.IsValid() { + vvx := reflect.New(t).Elem() + vvx.Set(vx) + vx = vvx + } + if vy.IsValid() { + vvy := reflect.New(t).Elem() + vvy.Set(vy) + vy = vvy + } + } else { + t = vx.Type() + } + s := newState(opts) - s.compareAny(reflect.ValueOf(x), reflect.ValueOf(y)) + s.compareAny(&pathStep{t, vx, vy}) return s.result.Equal() } // Diff returns a human-readable report of the differences between two values. // It returns an empty string if and only if Equal returns true for the same -// input values and options. The output string will use the "-" symbol to -// indicate elements removed from x, and the "+" symbol to indicate elements -// added to y. +// input values and options. +// +// The output is displayed as a literal in pseudo-Go syntax. +// At the start of each line, a "-" prefix indicates an element removed from x, +// a "+" prefix to indicates an element added to y, and the lack of a prefix +// indicates an element common to both x and y. If possible, the output +// uses fmt.Stringer.String or error.Error methods to produce more humanly +// readable outputs. In such cases, the string is prefixed with either an +// 's' or 'e' character, respectively, to indicate that the method was called. // -// Do not depend on this output being stable. +// Do not depend on this output being stable. If you need the ability to +// programmatically interpret the difference, consider using a custom Reporter. func Diff(x, y interface{}, opts ...Option) string { r := new(defaultReporter) - opts = Options{Options(opts), r} - eq := Equal(x, y, opts...) + eq := Equal(x, y, Options(opts), Reporter(r)) d := r.String() if (d == "") != eq { panic("inconsistent difference and equality results") @@ -108,9 +135,13 @@ func Diff(x, y interface{}, opts ...Option) string { type state struct { // These fields represent the "comparison state". // Calling statelessCompare must not result in observable changes to these. - result diff.Result // The current result of comparison - curPath Path // The current path in the value tree - reporter reporter // Optional reporter used for difference formatting + result diff.Result // The current result of comparison + curPath Path // The current path in the value tree + reporters []reporter // Optional reporters + + // recChecker checks for infinite cycles applying the same set of + // transformers upon the output of itself. + recChecker recChecker // dynChecker triggers pseudo-random checks for option correctness. // It is safe for statelessCompare to mutate this value. @@ -122,10 +153,9 @@ type state struct { } func newState(opts []Option) *state { - s := new(state) - for _, opt := range opts { - s.processOption(opt) - } + // Always ensure a validator option exists to validate the inputs. + s := &state{opts: Options{validator{}}} + s.processOption(Options(opts)) return s } @@ -152,10 +182,7 @@ func (s *state) processOption(opt Option) { s.exporters[t] = true } case reporter: - if s.reporter != nil { - panic("difference reporter already registered") - } - s.reporter = opt + s.reporters = append(s.reporters, opt) default: panic(fmt.Sprintf("unknown option %T", opt)) } @@ -164,153 +191,88 @@ func (s *state) processOption(opt Option) { // statelessCompare compares two values and returns the result. // This function is stateless in that it does not alter the current result, // or output to any registered reporters. -func (s *state) statelessCompare(vx, vy reflect.Value) diff.Result { +func (s *state) statelessCompare(step PathStep) diff.Result { // We do not save and restore the curPath because all of the compareX // methods should properly push and pop from the path. // It is an implementation bug if the contents of curPath differs from // when calling this function to when returning from it. - oldResult, oldReporter := s.result, s.reporter + oldResult, oldReporters := s.result, s.reporters s.result = diff.Result{} // Reset result - s.reporter = nil // Remove reporter to avoid spurious printouts - s.compareAny(vx, vy) + s.reporters = nil // Remove reporters to avoid spurious printouts + s.compareAny(step) res := s.result - s.result, s.reporter = oldResult, oldReporter + s.result, s.reporters = oldResult, oldReporters return res } -func (s *state) compareAny(vx, vy reflect.Value) { - // TODO: Support cyclic data structures. - - // Rule 0: Differing types are never equal. - if !vx.IsValid() || !vy.IsValid() { - s.report(vx.IsValid() == vy.IsValid(), vx, vy) - return - } - if vx.Type() != vy.Type() { - s.report(false, vx, vy) // Possible for path to be empty - return - } - t := vx.Type() - if len(s.curPath) == 0 { - s.curPath.push(&pathStep{typ: t}) - defer s.curPath.pop() +func (s *state) compareAny(step PathStep) { + // Update the path stack. + s.curPath.push(step) + defer s.curPath.pop() + for _, r := range s.reporters { + r.PushStep(step) + defer r.PopStep() } - vx, vy = s.tryExporting(vx, vy) + s.recChecker.Check(s.curPath) + + // Obtain the current type and values. + t := step.Type() + vx, vy := step.Values() // Rule 1: Check whether an option applies on this node in the value tree. - if s.tryOptions(vx, vy, t) { + if s.tryOptions(t, vx, vy) { return } // Rule 2: Check whether the type has a valid Equal method. - if s.tryMethod(vx, vy, t) { + if s.tryMethod(t, vx, vy) { return } - // Rule 3: Recursively descend into each value's underlying kind. + // Rule 3: Compare based on the underlying kind. switch t.Kind() { case reflect.Bool: - s.report(vx.Bool() == vy.Bool(), vx, vy) - return + s.report(vx.Bool() == vy.Bool(), 0) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - s.report(vx.Int() == vy.Int(), vx, vy) - return + s.report(vx.Int() == vy.Int(), 0) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - s.report(vx.Uint() == vy.Uint(), vx, vy) - return + s.report(vx.Uint() == vy.Uint(), 0) case reflect.Float32, reflect.Float64: - s.report(vx.Float() == vy.Float(), vx, vy) - return + s.report(vx.Float() == vy.Float(), 0) case reflect.Complex64, reflect.Complex128: - s.report(vx.Complex() == vy.Complex(), vx, vy) - return + s.report(vx.Complex() == vy.Complex(), 0) case reflect.String: - s.report(vx.String() == vy.String(), vx, vy) - return + s.report(vx.String() == vy.String(), 0) case reflect.Chan, reflect.UnsafePointer: - s.report(vx.Pointer() == vy.Pointer(), vx, vy) - return + s.report(vx.Pointer() == vy.Pointer(), 0) case reflect.Func: - s.report(vx.IsNil() && vy.IsNil(), vx, vy) - return + s.report(vx.IsNil() && vy.IsNil(), 0) + case reflect.Struct: + s.compareStruct(t, vx, vy) + case reflect.Slice, reflect.Array: + s.compareSlice(t, vx, vy) + case reflect.Map: + s.compareMap(t, vx, vy) case reflect.Ptr: - if vx.IsNil() || vy.IsNil() { - s.report(vx.IsNil() && vy.IsNil(), vx, vy) - return - } - s.curPath.push(&indirect{pathStep{t.Elem()}}) - defer s.curPath.pop() - s.compareAny(vx.Elem(), vy.Elem()) - return + s.comparePtr(t, vx, vy) case reflect.Interface: - if vx.IsNil() || vy.IsNil() { - s.report(vx.IsNil() && vy.IsNil(), vx, vy) - return - } - if vx.Elem().Type() != vy.Elem().Type() { - s.report(false, vx.Elem(), vy.Elem()) - return - } - s.curPath.push(&typeAssertion{pathStep{vx.Elem().Type()}}) - defer s.curPath.pop() - s.compareAny(vx.Elem(), vy.Elem()) - return - case reflect.Slice: - if vx.IsNil() || vy.IsNil() { - s.report(vx.IsNil() && vy.IsNil(), vx, vy) - return - } - fallthrough - case reflect.Array: - s.compareArray(vx, vy, t) - return - case reflect.Map: - s.compareMap(vx, vy, t) - return - case reflect.Struct: - s.compareStruct(vx, vy, t) - return + s.compareInterface(t, vx, vy) default: panic(fmt.Sprintf("%v kind not handled", t.Kind())) } } -func (s *state) tryExporting(vx, vy reflect.Value) (reflect.Value, reflect.Value) { - if sf, ok := s.curPath[len(s.curPath)-1].(*structField); ok && sf.unexported { - if sf.force { - // Use unsafe pointer arithmetic to get read-write access to an - // unexported field in the struct. - vx = unsafeRetrieveField(sf.pvx, sf.field) - vy = unsafeRetrieveField(sf.pvy, sf.field) - } else { - // We are not allowed to export the value, so invalidate them - // so that tryOptions can panic later if not explicitly ignored. - vx = nothing - vy = nothing - } - } - return vx, vy -} - -func (s *state) tryOptions(vx, vy reflect.Value, t reflect.Type) bool { - // If there were no FilterValues, we will not detect invalid inputs, - // so manually check for them and append invalid if necessary. - // We still evaluate the options since an ignore can override invalid. - opts := s.opts - if !vx.IsValid() || !vy.IsValid() { - opts = Options{opts, invalid{}} - } - +func (s *state) tryOptions(t reflect.Type, vx, vy reflect.Value) bool { // Evaluate all filters and apply the remaining options. - if opt := opts.filter(s, vx, vy, t); opt != nil { + if opt := s.opts.filter(s, t, vx, vy); opt != nil { opt.apply(s, vx, vy) return true } return false } -func (s *state) tryMethod(vx, vy reflect.Value, t reflect.Type) bool { +func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool { // Check if this type even has an Equal method. m, ok := t.MethodByName("Equal") if !ok || !function.IsType(m.Type, function.EqualAssignable) { @@ -318,11 +280,11 @@ func (s *state) tryMethod(vx, vy reflect.Value, t reflect.Type) bool { } eq := s.callTTBFunc(m.Func, vx, vy) - s.report(eq, vx, vy) + s.report(eq, reportByMethod) return true } -func (s *state) callTRFunc(f, v reflect.Value) reflect.Value { +func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value { v = sanitizeValue(v, f.Type().In(0)) if !s.dynChecker.Next() { return f.Call([]reflect.Value{v})[0] @@ -333,15 +295,15 @@ func (s *state) callTRFunc(f, v reflect.Value) reflect.Value { // unsafe mutations to the input. c := make(chan reflect.Value) go detectRaces(c, f, v) + got := <-c want := f.Call([]reflect.Value{v})[0] - if got := <-c; !s.statelessCompare(got, want).Equal() { + if step.vx, step.vy = got, want; !s.statelessCompare(step).Equal() { // To avoid false-positives with non-reflexive equality operations, // we sanity check whether a value is equal to itself. - if !s.statelessCompare(want, want).Equal() { + if step.vx, step.vy = want, want; !s.statelessCompare(step).Equal() { return want } - fn := getFuncName(f.Pointer()) - panic(fmt.Sprintf("non-deterministic function detected: %s", fn)) + panic(fmt.Sprintf("non-deterministic function detected: %s", function.NameOf(f))) } return want } @@ -359,10 +321,10 @@ func (s *state) callTTBFunc(f, x, y reflect.Value) bool { // unsafe mutations to the input. c := make(chan reflect.Value) go detectRaces(c, f, y, x) + got := <-c want := f.Call([]reflect.Value{x, y})[0].Bool() - if got := <-c; !got.IsValid() || got.Bool() != want { - fn := getFuncName(f.Pointer()) - panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", fn)) + if !got.IsValid() || got.Bool() != want { + panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", function.NameOf(f))) } return want } @@ -380,140 +342,241 @@ func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) { // assuming that T is assignable to R. // Otherwise, it returns the input value as is. func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { - // TODO(dsnet): Remove this hacky workaround. - // See https://golang.org/issue/22143 - if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t { - return reflect.New(t).Elem() + // TODO(dsnet): Workaround for reflect bug (https://golang.org/issue/22143). + if !flags.AtLeastGo110 { + if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t { + return reflect.New(t).Elem() + } } return v } -func (s *state) compareArray(vx, vy reflect.Value, t reflect.Type) { - step := &sliceIndex{pathStep{t.Elem()}, 0, 0} - s.curPath.push(step) +func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { + var vax, vay reflect.Value // Addressable versions of vx and vy - // Compute an edit-script for slices vx and vy. - es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result { - step.xkey, step.ykey = ix, iy - return s.statelessCompare(vx.Index(ix), vy.Index(iy)) - }) + step := StructField{&structField{}} + for i := 0; i < t.NumField(); i++ { + step.typ = t.Field(i).Type + step.vx = vx.Field(i) + step.vy = vy.Field(i) + step.name = t.Field(i).Name + step.idx = i + step.unexported = !isExported(step.name) + if step.unexported { + if step.name == "_" { + continue + } + // Defer checking of unexported fields until later to give an + // Ignore a chance to ignore the field. + if !vax.IsValid() || !vay.IsValid() { + // For retrieveUnexportedField to work, the parent struct must + // be addressable. Create a new copy of the values if + // necessary to make them addressable. + vax = makeAddressable(vx) + vay = makeAddressable(vy) + } + step.mayForce = s.exporters[t] + step.pvx = vax + step.pvy = vay + step.field = t.Field(i) + } + s.compareAny(step) + } +} - // Report the entire slice as is if the arrays are of primitive kind, - // and the arrays are different enough. - isPrimitive := false - switch t.Elem().Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, - reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: - isPrimitive = true - } - if isPrimitive && es.Dist() > (vx.Len()+vy.Len())/4 { - s.curPath.pop() // Pop first since we are reporting the whole slice - s.report(false, vx, vy) +func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) { + isSlice := t.Kind() == reflect.Slice + if isSlice && (vx.IsNil() || vy.IsNil()) { + s.report(vx.IsNil() && vy.IsNil(), 0) return } - // Replay the edit-script. + // TODO: Support cyclic data structures. + + step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}}} + withIndexes := func(ix, iy int) SliceIndex { + if ix >= 0 { + step.vx, step.xkey = vx.Index(ix), ix + } else { + step.vx, step.xkey = reflect.Value{}, -1 + } + if iy >= 0 { + step.vy, step.ykey = vy.Index(iy), iy + } else { + step.vy, step.ykey = reflect.Value{}, -1 + } + return step + } + + // Ignore options are able to ignore missing elements in a slice. + // However, detecting these reliably requires an optimal differencing + // algorithm, for which diff.Difference is not. + // + // Instead, we first iterate through both slices to detect which elements + // would be ignored if standing alone. The index of non-discarded elements + // are stored in a separate slice, which diffing is then performed on. + var indexesX, indexesY []int + var ignoredX, ignoredY []bool + for ix := 0; ix < vx.Len(); ix++ { + ignored := s.statelessCompare(withIndexes(ix, -1)).NumDiff == 0 + if !ignored { + indexesX = append(indexesX, ix) + } + ignoredX = append(ignoredX, ignored) + } + for iy := 0; iy < vy.Len(); iy++ { + ignored := s.statelessCompare(withIndexes(-1, iy)).NumDiff == 0 + if !ignored { + indexesY = append(indexesY, iy) + } + ignoredY = append(ignoredY, ignored) + } + + // Compute an edit-script for slices vx and vy (excluding ignored elements). + edits := diff.Difference(len(indexesX), len(indexesY), func(ix, iy int) diff.Result { + return s.statelessCompare(withIndexes(indexesX[ix], indexesY[iy])) + }) + + // Replay the ignore-scripts and the edit-script. var ix, iy int - for _, e := range es { + for ix < vx.Len() || iy < vy.Len() { + var e diff.EditType + switch { + case ix < len(ignoredX) && ignoredX[ix]: + e = diff.UniqueX + case iy < len(ignoredY) && ignoredY[iy]: + e = diff.UniqueY + default: + e, edits = edits[0], edits[1:] + } switch e { case diff.UniqueX: - step.xkey, step.ykey = ix, -1 - s.report(false, vx.Index(ix), nothing) + s.compareAny(withIndexes(ix, -1)) ix++ case diff.UniqueY: - step.xkey, step.ykey = -1, iy - s.report(false, nothing, vy.Index(iy)) + s.compareAny(withIndexes(-1, iy)) iy++ default: - step.xkey, step.ykey = ix, iy - if e == diff.Identity { - s.report(true, vx.Index(ix), vy.Index(iy)) - } else { - s.compareAny(vx.Index(ix), vy.Index(iy)) - } + s.compareAny(withIndexes(ix, iy)) ix++ iy++ } } - s.curPath.pop() - return } -func (s *state) compareMap(vx, vy reflect.Value, t reflect.Type) { +func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) { if vx.IsNil() || vy.IsNil() { - s.report(vx.IsNil() && vy.IsNil(), vx, vy) + s.report(vx.IsNil() && vy.IsNil(), 0) return } + // TODO: Support cyclic data structures. + // We combine and sort the two map keys so that we can perform the // comparisons in a deterministic order. - step := &mapIndex{pathStep: pathStep{t.Elem()}} - s.curPath.push(step) - defer s.curPath.pop() + step := MapIndex{&mapIndex{pathStep: pathStep{typ: t.Elem()}}} for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) { + step.vx = vx.MapIndex(k) + step.vy = vy.MapIndex(k) step.key = k - vvx := vx.MapIndex(k) - vvy := vy.MapIndex(k) - switch { - case vvx.IsValid() && vvy.IsValid(): - s.compareAny(vvx, vvy) - case vvx.IsValid() && !vvy.IsValid(): - s.report(false, vvx, nothing) - case !vvx.IsValid() && vvy.IsValid(): - s.report(false, nothing, vvy) - default: - // It is possible for both vvx and vvy to be invalid if the - // key contained a NaN value in it. There is no way in - // reflection to be able to retrieve these values. - // See https://golang.org/issue/11104 - panic(fmt.Sprintf("%#v has map key with NaNs", s.curPath)) + if !step.vx.IsValid() && !step.vy.IsValid() { + // It is possible for both vx and vy to be invalid if the + // key contained a NaN value in it. + // + // Even with the ability to retrieve NaN keys in Go 1.12, + // there still isn't a sensible way to compare the values since + // a NaN key may map to multiple unordered values. + // The most reasonable way to compare NaNs would be to compare the + // set of values. However, this is impossible to do efficiently + // since set equality is provably an O(n^2) operation given only + // an Equal function. If we had a Less function or Hash function, + // this could be done in O(n*log(n)) or O(n), respectively. + // + // Rather than adding complex logic to deal with NaNs, make it + // the user's responsibility to compare such obscure maps. + const help = "consider providing a Comparer to compare the map" + panic(fmt.Sprintf("%#v has map key with NaNs\n%s", s.curPath, help)) } + s.compareAny(step) } } -func (s *state) compareStruct(vx, vy reflect.Value, t reflect.Type) { - var vax, vay reflect.Value // Addressable versions of vx and vy +func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } - step := &structField{} - s.curPath.push(step) - defer s.curPath.pop() - for i := 0; i < t.NumField(); i++ { - vvx := vx.Field(i) - vvy := vy.Field(i) - step.typ = t.Field(i).Type - step.name = t.Field(i).Name - step.idx = i - step.unexported = !isExported(step.name) - if step.unexported { - // Defer checking of unexported fields until later to give an - // Ignore a chance to ignore the field. - if !vax.IsValid() || !vay.IsValid() { - // For unsafeRetrieveField to work, the parent struct must - // be addressable. Create a new copy of the values if - // necessary to make them addressable. - vax = makeAddressable(vx) - vay = makeAddressable(vy) - } - step.force = s.exporters[t] - step.pvx = vax - step.pvy = vay - step.field = t.Field(i) + // TODO: Support cyclic data structures. + + vx, vy = vx.Elem(), vy.Elem() + s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}}) +} + +func (s *state) compareInterface(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + vx, vy = vx.Elem(), vy.Elem() + if vx.Type() != vy.Type() { + s.report(false, 0) + return + } + s.compareAny(TypeAssertion{&typeAssertion{pathStep{vx.Type(), vx, vy}}}) +} + +func (s *state) report(eq bool, rf resultFlags) { + if rf&reportByIgnore == 0 { + if eq { + s.result.NumSame++ + rf |= reportEqual + } else { + s.result.NumDiff++ + rf |= reportUnequal } - s.compareAny(vvx, vvy) + } + for _, r := range s.reporters { + r.Report(Result{flags: rf}) } } -// report records the result of a single comparison. -// It also calls Report if any reporter is registered. -func (s *state) report(eq bool, vx, vy reflect.Value) { - if eq { - s.result.NSame++ - } else { - s.result.NDiff++ +// recChecker tracks the state needed to periodically perform checks that +// user provided transformers are not stuck in an infinitely recursive cycle. +type recChecker struct{ next int } + +// Check scans the Path for any recursive transformers and panics when any +// recursive transformers are detected. Note that the presence of a +// recursive Transformer does not necessarily imply an infinite cycle. +// As such, this check only activates after some minimal number of path steps. +func (rc *recChecker) Check(p Path) { + const minLen = 1 << 16 + if rc.next == 0 { + rc.next = minLen + } + if len(p) < rc.next { + return + } + rc.next <<= 1 + + // Check whether the same transformer has appeared at least twice. + var ss []string + m := map[Option]int{} + for _, ps := range p { + if t, ok := ps.(Transform); ok { + t := t.Option() + if m[t] == 1 { // Transformer was used exactly once before + tf := t.(*transformer).fnc.Type() + ss = append(ss, fmt.Sprintf("%v: %v => %v", t, tf.In(0), tf.Out(0))) + } + m[t]++ + } } - if s.reporter != nil { - s.reporter.Report(vx, vy, eq, s.curPath) + if len(ss) > 0 { + const warning = "recursive set of Transformers detected" + const help = "consider using cmpopts.AcyclicTransformer" + set := strings.Join(ss, "\n\t") + panic(fmt.Sprintf("%s:\n\t%s\n%s", warning, set, help)) } } diff --git a/vendor/github.com/google/go-cmp/cmp/unsafe_panic.go b/vendor/github.com/google/go-cmp/cmp/export_panic.go similarity index 60% rename from vendor/github.com/google/go-cmp/cmp/unsafe_panic.go rename to vendor/github.com/google/go-cmp/cmp/export_panic.go index d1518eb3a8c..abc3a1c3e76 100644 --- a/vendor/github.com/google/go-cmp/cmp/unsafe_panic.go +++ b/vendor/github.com/google/go-cmp/cmp/export_panic.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE.md file. -// +build purego appengine js +// +build purego package cmp @@ -10,6 +10,6 @@ import "reflect" const supportAllowUnexported = false -func unsafeRetrieveField(reflect.Value, reflect.StructField) reflect.Value { - panic("unsafeRetrieveField is not implemented") +func retrieveUnexportedField(reflect.Value, reflect.StructField) reflect.Value { + panic("retrieveUnexportedField is not implemented") } diff --git a/vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go similarity index 64% rename from vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go rename to vendor/github.com/google/go-cmp/cmp/export_unsafe.go index 579b65507f6..59d4ee91b47 100644 --- a/vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go +++ b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE.md file. -// +build !purego,!appengine,!js +// +build !purego package cmp @@ -13,11 +13,11 @@ import ( const supportAllowUnexported = true -// unsafeRetrieveField uses unsafe to forcibly retrieve any field from a struct -// such that the value has read-write permissions. +// retrieveUnexportedField uses unsafe to forcibly retrieve any field from +// a struct such that the value has read-write permissions. // // The parent struct, v, must be addressable, while f must be a StructField // describing the field to retrieve. -func unsafeRetrieveField(v reflect.Value, f reflect.StructField) reflect.Value { +func retrieveUnexportedField(v reflect.Value, f reflect.StructField) reflect.Value { return reflect.NewAt(f.Type, unsafe.Pointer(v.UnsafeAddr()+f.Offset)).Elem() } diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go index 42afa4960ef..fe98dcc6774 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE.md file. -// +build !debug +// +build !cmp_debug package diff diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go index fd9f7f17739..597b6ae56b1 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE.md file. -// +build debug +// +build cmp_debug package diff @@ -14,7 +14,7 @@ import ( ) // The algorithm can be seen running in real-time by enabling debugging: -// go test -tags=debug -v +// go test -tags=cmp_debug -v // // Example output: // === RUN TestDifference/#34 diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go index 260befea2fd..3d2e42662ca 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go @@ -85,22 +85,31 @@ func (es EditScript) LenY() int { return len(es) - es.stats().NX } type EqualFunc func(ix int, iy int) Result // Result is the result of comparison. -// NSame is the number of sub-elements that are equal. -// NDiff is the number of sub-elements that are not equal. -type Result struct{ NSame, NDiff int } +// NumSame is the number of sub-elements that are equal. +// NumDiff is the number of sub-elements that are not equal. +type Result struct{ NumSame, NumDiff int } + +// BoolResult returns a Result that is either Equal or not Equal. +func BoolResult(b bool) Result { + if b { + return Result{NumSame: 1} // Equal, Similar + } else { + return Result{NumDiff: 2} // Not Equal, not Similar + } +} // Equal indicates whether the symbols are equal. Two symbols are equal -// if and only if NDiff == 0. If Equal, then they are also Similar. -func (r Result) Equal() bool { return r.NDiff == 0 } +// if and only if NumDiff == 0. If Equal, then they are also Similar. +func (r Result) Equal() bool { return r.NumDiff == 0 } // Similar indicates whether two symbols are similar and may be represented // by using the Modified type. As a special case, we consider binary comparisons // (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar. // -// The exact ratio of NSame to NDiff to determine similarity may change. +// The exact ratio of NumSame to NumDiff to determine similarity may change. func (r Result) Similar() bool { - // Use NSame+1 to offset NSame so that binary comparisons are similar. - return r.NSame+1 >= r.NDiff + // Use NumSame+1 to offset NumSame so that binary comparisons are similar. + return r.NumSame+1 >= r.NumDiff } // Difference reports whether two lists of lengths nx and ny are equal @@ -191,9 +200,9 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { // that two lists commonly differ because elements were added to the front // or end of the other list. // - // Running the tests with the "debug" build tag prints a visualization of - // the algorithm running in real-time. This is educational for understanding - // how the algorithm works. See debug_enable.go. + // Running the tests with the "cmp_debug" build tag prints a visualization + // of the algorithm running in real-time. This is educational for + // understanding how the algorithm works. See debug_enable.go. f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es) for { // Forward search from the beginning. diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go new file mode 100644 index 00000000000..a9e7fc0b5b3 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go @@ -0,0 +1,9 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package flags + +// Deterministic controls whether the output of Diff should be deterministic. +// This is only used for testing. +var Deterministic bool diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go new file mode 100644 index 00000000000..01aed0a1532 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go @@ -0,0 +1,10 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !go1.10 + +package flags + +// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. +const AtLeastGo110 = false diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go new file mode 100644 index 00000000000..c0b667f58b0 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go @@ -0,0 +1,10 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build go1.10 + +package flags + +// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. +const AtLeastGo110 = true diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go index 4c35ff11ee1..ace1dbe86e5 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go @@ -2,25 +2,34 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE.md file. -// Package function identifies function types. +// Package function provides functionality for identifying function types. package function -import "reflect" +import ( + "reflect" + "regexp" + "runtime" + "strings" +) type funcType int const ( _ funcType = iota + tbFunc // func(T) bool ttbFunc // func(T, T) bool + trbFunc // func(T, R) bool tibFunc // func(T, I) bool trFunc // func(T) R - Equal = ttbFunc // func(T, T) bool - EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool - Transformer = trFunc // func(T) R - ValueFilter = ttbFunc // func(T, T) bool - Less = ttbFunc // func(T, T) bool + Equal = ttbFunc // func(T, T) bool + EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool + Transformer = trFunc // func(T) R + ValueFilter = ttbFunc // func(T, T) bool + Less = ttbFunc // func(T, T) bool + ValuePredicate = tbFunc // func(T) bool + KeyValuePredicate = trbFunc // func(T, R) bool ) var boolType = reflect.TypeOf(true) @@ -32,10 +41,18 @@ func IsType(t reflect.Type, ft funcType) bool { } ni, no := t.NumIn(), t.NumOut() switch ft { + case tbFunc: // func(T) bool + if ni == 1 && no == 1 && t.Out(0) == boolType { + return true + } case ttbFunc: // func(T, T) bool if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType { return true } + case trbFunc: // func(T, R) bool + if ni == 2 && no == 1 && t.Out(0) == boolType { + return true + } case tibFunc: // func(T, I) bool if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType { return true @@ -47,3 +64,36 @@ func IsType(t reflect.Type, ft funcType) bool { } return false } + +var lastIdentRx = regexp.MustCompile(`[_\p{L}][_\p{L}\p{N}]*$`) + +// NameOf returns the name of the function value. +func NameOf(v reflect.Value) string { + fnc := runtime.FuncForPC(v.Pointer()) + if fnc == nil { + return "" + } + fullName := fnc.Name() // e.g., "long/path/name/mypkg.(*MyType).(long/path/name/mypkg.myMethod)-fm" + + // Method closures have a "-fm" suffix. + fullName = strings.TrimSuffix(fullName, "-fm") + + var name string + for len(fullName) > 0 { + inParen := strings.HasSuffix(fullName, ")") + fullName = strings.TrimSuffix(fullName, ")") + + s := lastIdentRx.FindString(fullName) + if s == "" { + break + } + name = s + "." + name + fullName = strings.TrimSuffix(fullName, s) + + if i := strings.LastIndexByte(fullName, '('); inParen && i >= 0 { + fullName = fullName[:i] + } + fullName = strings.TrimSuffix(fullName, ".") + } + return strings.TrimSuffix(name, ".") +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/format.go b/vendor/github.com/google/go-cmp/cmp/internal/value/format.go deleted file mode 100644 index 657e508779d..00000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/format.go +++ /dev/null @@ -1,277 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -// Package value provides functionality for reflect.Value types. -package value - -import ( - "fmt" - "reflect" - "strconv" - "strings" - "unicode" -) - -var stringerIface = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() - -// Format formats the value v as a string. -// -// This is similar to fmt.Sprintf("%+v", v) except this: -// * Prints the type unless it can be elided -// * Avoids printing struct fields that are zero -// * Prints a nil-slice as being nil, not empty -// * Prints map entries in deterministic order -func Format(v reflect.Value, conf FormatConfig) string { - conf.printType = true - conf.followPointers = true - conf.realPointers = true - return formatAny(v, conf, nil) -} - -type FormatConfig struct { - UseStringer bool // Should the String method be used if available? - printType bool // Should we print the type before the value? - PrintPrimitiveType bool // Should we print the type of primitives? - followPointers bool // Should we recursively follow pointers? - realPointers bool // Should we print the real address of pointers? -} - -func formatAny(v reflect.Value, conf FormatConfig, visited map[uintptr]bool) string { - // TODO: Should this be a multi-line printout in certain situations? - - if !v.IsValid() { - return "" - } - if conf.UseStringer && v.Type().Implements(stringerIface) && v.CanInterface() { - if (v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface) && v.IsNil() { - return "" - } - - const stringerPrefix = "s" // Indicates that the String method was used - s := v.Interface().(fmt.Stringer).String() - return stringerPrefix + formatString(s) - } - - switch v.Kind() { - case reflect.Bool: - return formatPrimitive(v.Type(), v.Bool(), conf) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return formatPrimitive(v.Type(), v.Int(), conf) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - if v.Type().PkgPath() == "" || v.Kind() == reflect.Uintptr { - // Unnamed uints are usually bytes or words, so use hexadecimal. - return formatPrimitive(v.Type(), formatHex(v.Uint()), conf) - } - return formatPrimitive(v.Type(), v.Uint(), conf) - case reflect.Float32, reflect.Float64: - return formatPrimitive(v.Type(), v.Float(), conf) - case reflect.Complex64, reflect.Complex128: - return formatPrimitive(v.Type(), v.Complex(), conf) - case reflect.String: - return formatPrimitive(v.Type(), formatString(v.String()), conf) - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - return formatPointer(v, conf) - case reflect.Ptr: - if v.IsNil() { - if conf.printType { - return fmt.Sprintf("(%v)(nil)", v.Type()) - } - return "" - } - if visited[v.Pointer()] || !conf.followPointers { - return formatPointer(v, conf) - } - visited = insertPointer(visited, v.Pointer()) - return "&" + formatAny(v.Elem(), conf, visited) - case reflect.Interface: - if v.IsNil() { - if conf.printType { - return fmt.Sprintf("%v(nil)", v.Type()) - } - return "" - } - return formatAny(v.Elem(), conf, visited) - case reflect.Slice: - if v.IsNil() { - if conf.printType { - return fmt.Sprintf("%v(nil)", v.Type()) - } - return "" - } - if visited[v.Pointer()] { - return formatPointer(v, conf) - } - visited = insertPointer(visited, v.Pointer()) - fallthrough - case reflect.Array: - var ss []string - subConf := conf - subConf.printType = v.Type().Elem().Kind() == reflect.Interface - for i := 0; i < v.Len(); i++ { - s := formatAny(v.Index(i), subConf, visited) - ss = append(ss, s) - } - s := fmt.Sprintf("{%s}", strings.Join(ss, ", ")) - if conf.printType { - return v.Type().String() + s - } - return s - case reflect.Map: - if v.IsNil() { - if conf.printType { - return fmt.Sprintf("%v(nil)", v.Type()) - } - return "" - } - if visited[v.Pointer()] { - return formatPointer(v, conf) - } - visited = insertPointer(visited, v.Pointer()) - - var ss []string - keyConf, valConf := conf, conf - keyConf.printType = v.Type().Key().Kind() == reflect.Interface - keyConf.followPointers = false - valConf.printType = v.Type().Elem().Kind() == reflect.Interface - for _, k := range SortKeys(v.MapKeys()) { - sk := formatAny(k, keyConf, visited) - sv := formatAny(v.MapIndex(k), valConf, visited) - ss = append(ss, fmt.Sprintf("%s: %s", sk, sv)) - } - s := fmt.Sprintf("{%s}", strings.Join(ss, ", ")) - if conf.printType { - return v.Type().String() + s - } - return s - case reflect.Struct: - var ss []string - subConf := conf - subConf.printType = true - for i := 0; i < v.NumField(); i++ { - vv := v.Field(i) - if isZero(vv) { - continue // Elide zero value fields - } - name := v.Type().Field(i).Name - subConf.UseStringer = conf.UseStringer - s := formatAny(vv, subConf, visited) - ss = append(ss, fmt.Sprintf("%s: %s", name, s)) - } - s := fmt.Sprintf("{%s}", strings.Join(ss, ", ")) - if conf.printType { - return v.Type().String() + s - } - return s - default: - panic(fmt.Sprintf("%v kind not handled", v.Kind())) - } -} - -func formatString(s string) string { - // Use quoted string if it the same length as a raw string literal. - // Otherwise, attempt to use the raw string form. - qs := strconv.Quote(s) - if len(qs) == 1+len(s)+1 { - return qs - } - - // Disallow newlines to ensure output is a single line. - // Only allow printable runes for readability purposes. - rawInvalid := func(r rune) bool { - return r == '`' || r == '\n' || !unicode.IsPrint(r) - } - if strings.IndexFunc(s, rawInvalid) < 0 { - return "`" + s + "`" - } - return qs -} - -func formatPrimitive(t reflect.Type, v interface{}, conf FormatConfig) string { - if conf.printType && (conf.PrintPrimitiveType || t.PkgPath() != "") { - return fmt.Sprintf("%v(%v)", t, v) - } - return fmt.Sprintf("%v", v) -} - -func formatPointer(v reflect.Value, conf FormatConfig) string { - p := v.Pointer() - if !conf.realPointers { - p = 0 // For deterministic printing purposes - } - s := formatHex(uint64(p)) - if conf.printType { - return fmt.Sprintf("(%v)(%s)", v.Type(), s) - } - return s -} - -func formatHex(u uint64) string { - var f string - switch { - case u <= 0xff: - f = "0x%02x" - case u <= 0xffff: - f = "0x%04x" - case u <= 0xffffff: - f = "0x%06x" - case u <= 0xffffffff: - f = "0x%08x" - case u <= 0xffffffffff: - f = "0x%010x" - case u <= 0xffffffffffff: - f = "0x%012x" - case u <= 0xffffffffffffff: - f = "0x%014x" - case u <= 0xffffffffffffffff: - f = "0x%016x" - } - return fmt.Sprintf(f, u) -} - -// insertPointer insert p into m, allocating m if necessary. -func insertPointer(m map[uintptr]bool, p uintptr) map[uintptr]bool { - if m == nil { - m = make(map[uintptr]bool) - } - m[p] = true - return m -} - -// isZero reports whether v is the zero value. -// This does not rely on Interface and so can be used on unexported fields. -func isZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return v.Bool() == false - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Complex64, reflect.Complex128: - return v.Complex() == 0 - case reflect.String: - return v.String() == "" - case reflect.UnsafePointer: - return v.Pointer() == 0 - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - return v.IsNil() - case reflect.Array: - for i := 0; i < v.Len(); i++ { - if !isZero(v.Index(i)) { - return false - } - } - return true - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - if !isZero(v.Field(i)) { - return false - } - } - return true - } - return false -} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go new file mode 100644 index 00000000000..0a01c4796f1 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go @@ -0,0 +1,23 @@ +// Copyright 2018, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build purego + +package value + +import "reflect" + +// Pointer is an opaque typed pointer and is guaranteed to be comparable. +type Pointer struct { + p uintptr + t reflect.Type +} + +// PointerOf returns a Pointer from v, which must be a +// reflect.Ptr, reflect.Slice, or reflect.Map. +func PointerOf(v reflect.Value) Pointer { + // NOTE: Storing a pointer as an uintptr is technically incorrect as it + // assumes that the GC implementation does not use a moving collector. + return Pointer{v.Pointer(), v.Type()} +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go new file mode 100644 index 00000000000..da134ae2a80 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go @@ -0,0 +1,26 @@ +// Copyright 2018, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !purego + +package value + +import ( + "reflect" + "unsafe" +) + +// Pointer is an opaque typed pointer and is guaranteed to be comparable. +type Pointer struct { + p unsafe.Pointer + t reflect.Type +} + +// PointerOf returns a Pointer from v, which must be a +// reflect.Ptr, reflect.Slice, or reflect.Map. +func PointerOf(v reflect.Value) Pointer { + // The proper representation of a pointer is unsafe.Pointer, + // which is necessary if the GC ever uses a moving collector. + return Pointer{unsafe.Pointer(v.Pointer()), v.Type()} +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go index fe8aa27a077..938f646f000 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go @@ -19,7 +19,7 @@ func SortKeys(vs []reflect.Value) []reflect.Value { } // Sort the map keys. - sort.Sort(valueSorter(vs)) + sort.Slice(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) }) // Deduplicate keys (fails for NaNs). vs2 := vs[:1] @@ -31,13 +31,6 @@ func SortKeys(vs []reflect.Value) []reflect.Value { return vs2 } -// TODO: Use sort.Slice once Google AppEngine is on Go1.8 or above. -type valueSorter []reflect.Value - -func (vs valueSorter) Len() int { return len(vs) } -func (vs valueSorter) Less(i, j int) bool { return isLess(vs[i], vs[j]) } -func (vs valueSorter) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } - // isLess is a generic function for sorting arbitrary map keys. // The inputs must be of the same type and must be comparable. func isLess(x, y reflect.Value) bool { diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go new file mode 100644 index 00000000000..d13a12ccfcd --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go @@ -0,0 +1,45 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package value + +import "reflect" + +// IsZero reports whether v is the zero value. +// This does not rely on Interface and so can be used on unexported fields. +func IsZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return v.Bool() == false + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Complex64, reflect.Complex128: + return v.Complex() == 0 + case reflect.String: + return v.String() == "" + case reflect.UnsafePointer: + return v.Pointer() == 0 + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + return v.IsNil() + case reflect.Array: + for i := 0; i < v.Len(); i++ { + if !IsZero(v.Index(i)) { + return false + } + } + return true + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + if !IsZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go index 91d4b066e05..793448160ee 100644 --- a/vendor/github.com/google/go-cmp/cmp/options.go +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -7,7 +7,7 @@ package cmp import ( "fmt" "reflect" - "runtime" + "regexp" "strings" "github.com/google/go-cmp/cmp/internal/function" @@ -29,11 +29,11 @@ type Option interface { // An Options is returned only if multiple comparers or transformers // can apply simultaneously and will only contain values of those types // or sub-Options containing values of those types. - filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption + filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption } // applicableOption represents the following types: -// Fundamental: ignore | invalid | *comparer | *transformer +// Fundamental: ignore | validator | *comparer | *transformer // Grouping: Options type applicableOption interface { Option @@ -43,7 +43,7 @@ type applicableOption interface { } // coreOption represents the following types: -// Fundamental: ignore | invalid | *comparer | *transformer +// Fundamental: ignore | validator | *comparer | *transformer // Filters: *pathFilter | *valuesFilter type coreOption interface { Option @@ -63,19 +63,19 @@ func (core) isCore() {} // on all individual options held within. type Options []Option -func (opts Options) filter(s *state, vx, vy reflect.Value, t reflect.Type) (out applicableOption) { +func (opts Options) filter(s *state, t reflect.Type, vx, vy reflect.Value) (out applicableOption) { for _, opt := range opts { - switch opt := opt.filter(s, vx, vy, t); opt.(type) { + switch opt := opt.filter(s, t, vx, vy); opt.(type) { case ignore: return ignore{} // Only ignore can short-circuit evaluation - case invalid: - out = invalid{} // Takes precedence over comparer or transformer + case validator: + out = validator{} // Takes precedence over comparer or transformer case *comparer, *transformer, Options: switch out.(type) { case nil: out = opt - case invalid: - // Keep invalid + case validator: + // Keep validator case *comparer, *transformer, Options: out = Options{out, opt} // Conflicting comparers or transformers } @@ -106,6 +106,11 @@ func (opts Options) String() string { // FilterPath returns a new Option where opt is only evaluated if filter f // returns true for the current Path in the value tree. // +// This filter is called even if a slice element or map entry is missing and +// provides an opportunity to ignore such cases. The filter function must be +// symmetric such that the filter result is identical regardless of whether the +// missing value is from x or y. +// // The option passed in may be an Ignore, Transformer, Comparer, Options, or // a previously filtered Option. func FilterPath(f func(Path) bool, opt Option) Option { @@ -124,22 +129,22 @@ type pathFilter struct { opt Option } -func (f pathFilter) filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption { +func (f pathFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { if f.fnc(s.curPath) { - return f.opt.filter(s, vx, vy, t) + return f.opt.filter(s, t, vx, vy) } return nil } func (f pathFilter) String() string { - fn := getFuncName(reflect.ValueOf(f.fnc).Pointer()) - return fmt.Sprintf("FilterPath(%s, %v)", fn, f.opt) + return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt) } // FilterValues returns a new Option where opt is only evaluated if filter f, // which is a function of the form "func(T, T) bool", returns true for the -// current pair of values being compared. If the type of the values is not -// assignable to T, then this filter implicitly returns false. +// current pair of values being compared. If either value is invalid or +// the type of the values is not assignable to T, then this filter implicitly +// returns false. // // The filter function must be // symmetric (i.e., agnostic to the order of the inputs) and @@ -171,19 +176,18 @@ type valuesFilter struct { opt Option } -func (f valuesFilter) filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption { - if !vx.IsValid() || !vy.IsValid() { - return invalid{} +func (f valuesFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { + if !vx.IsValid() || !vx.CanInterface() || !vy.IsValid() || !vy.CanInterface() { + return nil } if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) { - return f.opt.filter(s, vx, vy, t) + return f.opt.filter(s, t, vx, vy) } return nil } func (f valuesFilter) String() string { - fn := getFuncName(f.fnc.Pointer()) - return fmt.Sprintf("FilterValues(%s, %v)", fn, f.opt) + return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt) } // Ignore is an Option that causes all comparisons to be ignored. @@ -194,20 +198,45 @@ func Ignore() Option { return ignore{} } type ignore struct{ core } func (ignore) isFiltered() bool { return false } -func (ignore) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption { return ignore{} } -func (ignore) apply(_ *state, _, _ reflect.Value) { return } +func (ignore) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { return ignore{} } +func (ignore) apply(s *state, _, _ reflect.Value) { s.report(true, reportByIgnore) } func (ignore) String() string { return "Ignore()" } -// invalid is a sentinel Option type to indicate that some options could not -// be evaluated due to unexported fields. -type invalid struct{ core } +// validator is a sentinel Option type to indicate that some options could not +// be evaluated due to unexported fields, missing slice elements, or +// missing map entries. Both values are validator only for unexported fields. +type validator struct{ core } + +func (validator) filter(_ *state, _ reflect.Type, vx, vy reflect.Value) applicableOption { + if !vx.IsValid() || !vy.IsValid() { + return validator{} + } + if !vx.CanInterface() || !vy.CanInterface() { + return validator{} + } + return nil +} +func (validator) apply(s *state, vx, vy reflect.Value) { + // Implies missing slice element or map entry. + if !vx.IsValid() || !vy.IsValid() { + s.report(vx.IsValid() == vy.IsValid(), 0) + return + } + + // Unable to Interface implies unexported field without visibility access. + if !vx.CanInterface() || !vy.CanInterface() { + const help = "consider using a custom Comparer; if you control the implementation of type, you can also consider AllowUnexported or cmpopts.IgnoreUnexported" + panic(fmt.Sprintf("cannot handle unexported field: %#v\n%s", s.curPath, help)) + } -func (invalid) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption { return invalid{} } -func (invalid) apply(s *state, _, _ reflect.Value) { - const help = "consider using AllowUnexported or cmpopts.IgnoreUnexported" - panic(fmt.Sprintf("cannot handle unexported field: %#v\n%s", s.curPath, help)) + panic("not reachable") } +// identRx represents a valid identifier according to the Go specification. +const identRx = `[_\p{L}][_\p{L}\p{N}]*` + +var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`) + // Transformer returns an Option that applies a transformation function that // converts values of a certain type into that of another. // @@ -220,18 +249,25 @@ func (invalid) apply(s *state, _, _ reflect.Value) { // input and output types are the same), an implicit filter is added such that // a transformer is applicable only if that exact transformer is not already // in the tail of the Path since the last non-Transform step. +// For situations where the implicit filter is still insufficient, +// consider using cmpopts.AcyclicTransformer, which adds a filter +// to prevent the transformer from being recursively applied upon itself. // // The name is a user provided label that is used as the Transform.Name in the -// transformation PathStep. If empty, an arbitrary name is used. +// transformation PathStep (and eventually shown in the Diff output). +// The name must be a valid identifier or qualified identifier in Go syntax. +// If empty, an arbitrary name is used. func Transformer(name string, f interface{}) Option { v := reflect.ValueOf(f) if !function.IsType(v.Type(), function.Transformer) || v.IsNil() { panic(fmt.Sprintf("invalid transformer function: %T", f)) } if name == "" { - name = "λ" // Lambda-symbol as place-holder for anonymous transformer - } - if !isValid(name) { + name = function.NameOf(v) + if !identsRx.MatchString(name) { + name = "λ" // Lambda-symbol as placeholder name + } + } else if !identsRx.MatchString(name) { panic(fmt.Sprintf("invalid name: %q", name)) } tr := &transformer{name: name, fnc: reflect.ValueOf(f)} @@ -250,9 +286,9 @@ type transformer struct { func (tr *transformer) isFiltered() bool { return tr.typ != nil } -func (tr *transformer) filter(s *state, _, _ reflect.Value, t reflect.Type) applicableOption { +func (tr *transformer) filter(s *state, t reflect.Type, _, _ reflect.Value) applicableOption { for i := len(s.curPath) - 1; i >= 0; i-- { - if t, ok := s.curPath[i].(*transform); !ok { + if t, ok := s.curPath[i].(Transform); !ok { break // Hit most recent non-Transform step } else if tr == t.trans { return nil // Cannot directly use same Transform @@ -265,18 +301,15 @@ func (tr *transformer) filter(s *state, _, _ reflect.Value, t reflect.Type) appl } func (tr *transformer) apply(s *state, vx, vy reflect.Value) { - // Update path before calling the Transformer so that dynamic checks - // will use the updated path. - s.curPath.push(&transform{pathStep{tr.fnc.Type().Out(0)}, tr}) - defer s.curPath.pop() - - vx = s.callTRFunc(tr.fnc, vx) - vy = s.callTRFunc(tr.fnc, vy) - s.compareAny(vx, vy) + step := Transform{&transform{pathStep{typ: tr.fnc.Type().Out(0)}, tr}} + vvx := s.callTRFunc(tr.fnc, vx, step) + vvy := s.callTRFunc(tr.fnc, vy, step) + step.vx, step.vy = vvx, vvy + s.compareAny(step) } func (tr transformer) String() string { - return fmt.Sprintf("Transformer(%s, %s)", tr.name, getFuncName(tr.fnc.Pointer())) + return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc)) } // Comparer returns an Option that determines whether two values are equal @@ -311,7 +344,7 @@ type comparer struct { func (cm *comparer) isFiltered() bool { return cm.typ != nil } -func (cm *comparer) filter(_ *state, _, _ reflect.Value, t reflect.Type) applicableOption { +func (cm *comparer) filter(_ *state, t reflect.Type, _, _ reflect.Value) applicableOption { if cm.typ == nil || t.AssignableTo(cm.typ) { return cm } @@ -320,11 +353,11 @@ func (cm *comparer) filter(_ *state, _, _ reflect.Value, t reflect.Type) applica func (cm *comparer) apply(s *state, vx, vy reflect.Value) { eq := s.callTTBFunc(cm.fnc, vx, vy) - s.report(eq, vx, vy) + s.report(eq, reportByFunc) } func (cm comparer) String() string { - return fmt.Sprintf("Comparer(%s)", getFuncName(cm.fnc.Pointer())) + return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc)) } // AllowUnexported returns an Option that forcibly allows operations on @@ -338,7 +371,7 @@ func (cm comparer) String() string { // defined in an internal package where the semantic meaning of an unexported // field is in the control of the user. // -// For some cases, a custom Comparer should be used instead that defines +// In many cases, a custom Comparer should be used instead that defines // equality as a function of the public API of a type rather than the underlying // unexported implementation. // @@ -370,27 +403,92 @@ func AllowUnexported(types ...interface{}) Option { type visibleStructs map[reflect.Type]bool -func (visibleStructs) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption { +func (visibleStructs) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { panic("not implemented") } -// reporter is an Option that configures how differences are reported. -type reporter interface { - // TODO: Not exported yet. +// Result represents the comparison result for a single node and +// is provided by cmp when calling Result (see Reporter). +type Result struct { + _ [0]func() // Make Result incomparable + flags resultFlags +} + +// Equal reports whether the node was determined to be equal or not. +// As a special case, ignored nodes are considered equal. +func (r Result) Equal() bool { + return r.flags&(reportEqual|reportByIgnore) != 0 +} + +// ByIgnore reports whether the node is equal because it was ignored. +// This never reports true if Equal reports false. +func (r Result) ByIgnore() bool { + return r.flags&reportByIgnore != 0 +} + +// ByMethod reports whether the Equal method determined equality. +func (r Result) ByMethod() bool { + return r.flags&reportByMethod != 0 +} + +// ByFunc reports whether a Comparer function determined equality. +func (r Result) ByFunc() bool { + return r.flags&reportByFunc != 0 +} + +type resultFlags uint + +const ( + _ resultFlags = (1 << iota) / 2 + + reportEqual + reportUnequal + reportByIgnore + reportByMethod + reportByFunc +) + +// Reporter is an Option that can be passed to Equal. When Equal traverses +// the value trees, it calls PushStep as it descends into each node in the +// tree and PopStep as it ascend out of the node. The leaves of the tree are +// either compared (determined to be equal or not equal) or ignored and reported +// as such by calling the Report method. +func Reporter(r interface { + // PushStep is called when a tree-traversal operation is performed. + // The PathStep itself is only valid until the step is popped. + // The PathStep.Values are valid for the duration of the entire traversal + // and must not be mutated. + // + // Equal always calls PushStep at the start to provide an operation-less + // PathStep used to report the root values. // - // Perhaps add PushStep and PopStep and change Report to only accept - // a PathStep instead of the full-path? Adding a PushStep and PopStep makes - // it clear that we are traversing the value tree in a depth-first-search - // manner, which has an effect on how values are printed. + // Within a slice, the exact set of inserted, removed, or modified elements + // is unspecified and may change in future implementations. + // The entries of a map are iterated through in an unspecified order. + PushStep(PathStep) + + // Report is called exactly once on leaf nodes to report whether the + // comparison identified the node as equal, unequal, or ignored. + // A leaf node is one that is immediately preceded by and followed by + // a pair of PushStep and PopStep calls. + Report(Result) + + // PopStep ascends back up the value tree. + // There is always a matching pop call for every push call. + PopStep() +}) Option { + return reporter{r} +} - Option +type reporter struct{ reporterIface } +type reporterIface interface { + PushStep(PathStep) + Report(Result) + PopStep() +} - // Report is called for every comparison made and will be provided with - // the two values being compared, the equality result, and the - // current path in the value tree. It is possible for x or y to be an - // invalid reflect.Value if one of the values is non-existent; - // which is possible with maps and slices. - Report(x, y reflect.Value, eq bool, p Path) +func (reporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { + panic("not implemented") } // normalizeOption normalizes the input options such that all Options groups @@ -424,30 +522,3 @@ func flattenOptions(dst, src Options) Options { } return dst } - -// getFuncName returns a short function name from the pointer. -// The string parsing logic works up until Go1.9. -func getFuncName(p uintptr) string { - fnc := runtime.FuncForPC(p) - if fnc == nil { - return "" - } - name := fnc.Name() // E.g., "long/path/name/mypkg.(mytype).(long/path/name/mypkg.myfunc)-fm" - if strings.HasSuffix(name, ")-fm") || strings.HasSuffix(name, ")·fm") { - // Strip the package name from method name. - name = strings.TrimSuffix(name, ")-fm") - name = strings.TrimSuffix(name, ")·fm") - if i := strings.LastIndexByte(name, '('); i >= 0 { - methodName := name[i+1:] // E.g., "long/path/name/mypkg.myfunc" - if j := strings.LastIndexByte(methodName, '.'); j >= 0 { - methodName = methodName[j+1:] // E.g., "myfunc" - } - name = name[:i] + methodName // E.g., "long/path/name/mypkg.(mytype)." + "myfunc" - } - } - if i := strings.LastIndexByte(name, '/'); i >= 0 { - // Strip the package name. - name = name[i+1:] // E.g., "mypkg.(mytype).myfunc" - } - return name -} diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go index c08a3cf80d9..96fffd291f7 100644 --- a/vendor/github.com/google/go-cmp/cmp/path.go +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -12,80 +12,52 @@ import ( "unicode/utf8" ) -type ( - // Path is a list of PathSteps describing the sequence of operations to get - // from some root type to the current position in the value tree. - // The first Path element is always an operation-less PathStep that exists - // simply to identify the initial type. - // - // When traversing structs with embedded structs, the embedded struct will - // always be accessed as a field before traversing the fields of the - // embedded struct themselves. That is, an exported field from the - // embedded struct will never be accessed directly from the parent struct. - Path []PathStep - - // PathStep is a union-type for specific operations to traverse - // a value's tree structure. Users of this package never need to implement - // these types as values of this type will be returned by this package. - PathStep interface { - String() string - Type() reflect.Type // Resulting type after performing the path step - isPathStep() - } +// Path is a list of PathSteps describing the sequence of operations to get +// from some root type to the current position in the value tree. +// The first Path element is always an operation-less PathStep that exists +// simply to identify the initial type. +// +// When traversing structs with embedded structs, the embedded struct will +// always be accessed as a field before traversing the fields of the +// embedded struct themselves. That is, an exported field from the +// embedded struct will never be accessed directly from the parent struct. +type Path []PathStep - // SliceIndex is an index operation on a slice or array at some index Key. - SliceIndex interface { - PathStep - Key() int // May return -1 if in a split state - - // SplitKeys returns the indexes for indexing into slices in the - // x and y values, respectively. These indexes may differ due to the - // insertion or removal of an element in one of the slices, causing - // all of the indexes to be shifted. If an index is -1, then that - // indicates that the element does not exist in the associated slice. - // - // Key is guaranteed to return -1 if and only if the indexes returned - // by SplitKeys are not the same. SplitKeys will never return -1 for - // both indexes. - SplitKeys() (x int, y int) - - isSliceIndex() - } - // MapIndex is an index operation on a map at some index Key. - MapIndex interface { - PathStep - Key() reflect.Value - isMapIndex() - } - // TypeAssertion represents a type assertion on an interface. - TypeAssertion interface { - PathStep - isTypeAssertion() - } - // StructField represents a struct field access on a field called Name. - StructField interface { - PathStep - Name() string - Index() int - isStructField() - } - // Indirect represents pointer indirection on the parent type. - Indirect interface { - PathStep - isIndirect() - } - // Transform is a transformation from the parent type to the current type. - Transform interface { - PathStep - Name() string - Func() reflect.Value +// PathStep is a union-type for specific operations to traverse +// a value's tree structure. Users of this package never need to implement +// these types as values of this type will be returned by this package. +// +// Implementations of this interface are +// StructField, SliceIndex, MapIndex, Indirect, TypeAssertion, and Transform. +type PathStep interface { + String() string - // Option returns the originally constructed Transformer option. - // The == operator can be used to detect the exact option used. - Option() Option + // Type is the resulting type after performing the path step. + Type() reflect.Type - isTransform() - } + // Values is the resulting values after performing the path step. + // The type of each valid value is guaranteed to be identical to Type. + // + // In some cases, one or both may be invalid or have restrictions: + // • For StructField, both are not interface-able if the current field + // is unexported and the struct type is not explicitly permitted by + // AllowUnexported to traverse unexported fields. + // • For SliceIndex, one may be invalid if an element is missing from + // either the x or y slice. + // • For MapIndex, one may be invalid if an entry is missing from + // either the x or y map. + // + // The provided values must not be mutated. + Values() (vx, vy reflect.Value) +} + +var ( + _ PathStep = StructField{} + _ PathStep = SliceIndex{} + _ PathStep = MapIndex{} + _ PathStep = Indirect{} + _ PathStep = TypeAssertion{} + _ PathStep = Transform{} ) func (pa *Path) push(s PathStep) { @@ -124,7 +96,7 @@ func (pa Path) Index(i int) PathStep { func (pa Path) String() string { var ss []string for _, s := range pa { - if _, ok := s.(*structField); ok { + if _, ok := s.(StructField); ok { ss = append(ss, s.String()) } } @@ -144,13 +116,13 @@ func (pa Path) GoString() string { nextStep = pa[i+1] } switch s := s.(type) { - case *indirect: + case Indirect: numIndirect++ pPre, pPost := "(", ")" switch nextStep.(type) { - case *indirect: + case Indirect: continue // Next step is indirection, so let them batch up - case *structField: + case StructField: numIndirect-- // Automatic indirection on struct fields case nil: pPre, pPost = "", "" // Last step; no need for parenthesis @@ -161,19 +133,10 @@ func (pa Path) GoString() string { } numIndirect = 0 continue - case *transform: + case Transform: ssPre = append(ssPre, s.trans.name+"(") ssPost = append(ssPost, ")") continue - case *typeAssertion: - // As a special-case, elide type assertions on anonymous types - // since they are typically generated dynamically and can be very - // verbose. For example, some transforms return interface{} because - // of Go's lack of generics, but typically take in and return the - // exact same concrete type. - if s.Type().PkgPath() == "" { - continue - } } ssPost = append(ssPost, s.String()) } @@ -183,44 +146,13 @@ func (pa Path) GoString() string { return strings.Join(ssPre, "") + strings.Join(ssPost, "") } -type ( - pathStep struct { - typ reflect.Type - } - - sliceIndex struct { - pathStep - xkey, ykey int - } - mapIndex struct { - pathStep - key reflect.Value - } - typeAssertion struct { - pathStep - } - structField struct { - pathStep - name string - idx int - - // These fields are used for forcibly accessing an unexported field. - // pvx, pvy, and field are only valid if unexported is true. - unexported bool - force bool // Forcibly allow visibility - pvx, pvy reflect.Value // Parent values - field reflect.StructField // Field information - } - indirect struct { - pathStep - } - transform struct { - pathStep - trans *transformer - } -) +type pathStep struct { + typ reflect.Type + vx, vy reflect.Value +} -func (ps pathStep) Type() reflect.Type { return ps.typ } +func (ps pathStep) Type() reflect.Type { return ps.typ } +func (ps pathStep) Values() (vx, vy reflect.Value) { return ps.vx, ps.vy } func (ps pathStep) String() string { if ps.typ == nil { return "" @@ -232,7 +164,54 @@ func (ps pathStep) String() string { return fmt.Sprintf("{%s}", s) } -func (si sliceIndex) String() string { +// StructField represents a struct field access on a field called Name. +type StructField struct{ *structField } +type structField struct { + pathStep + name string + idx int + + // These fields are used for forcibly accessing an unexported field. + // pvx, pvy, and field are only valid if unexported is true. + unexported bool + mayForce bool // Forcibly allow visibility + pvx, pvy reflect.Value // Parent values + field reflect.StructField // Field information +} + +func (sf StructField) Type() reflect.Type { return sf.typ } +func (sf StructField) Values() (vx, vy reflect.Value) { + if !sf.unexported { + return sf.vx, sf.vy // CanInterface reports true + } + + // Forcibly obtain read-write access to an unexported struct field. + if sf.mayForce { + vx = retrieveUnexportedField(sf.pvx, sf.field) + vy = retrieveUnexportedField(sf.pvy, sf.field) + return vx, vy // CanInterface reports true + } + return sf.vx, sf.vy // CanInterface reports false +} +func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) } + +// Name is the field name. +func (sf StructField) Name() string { return sf.name } + +// Index is the index of the field in the parent struct type. +// See reflect.Type.Field. +func (sf StructField) Index() int { return sf.idx } + +// SliceIndex is an index operation on a slice or array at some index Key. +type SliceIndex struct{ *sliceIndex } +type sliceIndex struct { + pathStep + xkey, ykey int +} + +func (si SliceIndex) Type() reflect.Type { return si.typ } +func (si SliceIndex) Values() (vx, vy reflect.Value) { return si.vx, si.vy } +func (si SliceIndex) String() string { switch { case si.xkey == si.ykey: return fmt.Sprintf("[%d]", si.xkey) @@ -247,63 +226,83 @@ func (si sliceIndex) String() string { return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey) } } -func (mi mapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) } -func (ta typeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) } -func (sf structField) String() string { return fmt.Sprintf(".%s", sf.name) } -func (in indirect) String() string { return "*" } -func (tf transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) } -func (si sliceIndex) Key() int { +// Key is the index key; it may return -1 if in a split state +func (si SliceIndex) Key() int { if si.xkey != si.ykey { return -1 } return si.xkey } -func (si sliceIndex) SplitKeys() (x, y int) { return si.xkey, si.ykey } -func (mi mapIndex) Key() reflect.Value { return mi.key } -func (sf structField) Name() string { return sf.name } -func (sf structField) Index() int { return sf.idx } -func (tf transform) Name() string { return tf.trans.name } -func (tf transform) Func() reflect.Value { return tf.trans.fnc } -func (tf transform) Option() Option { return tf.trans } - -func (pathStep) isPathStep() {} -func (sliceIndex) isSliceIndex() {} -func (mapIndex) isMapIndex() {} -func (typeAssertion) isTypeAssertion() {} -func (structField) isStructField() {} -func (indirect) isIndirect() {} -func (transform) isTransform() {} -var ( - _ SliceIndex = sliceIndex{} - _ MapIndex = mapIndex{} - _ TypeAssertion = typeAssertion{} - _ StructField = structField{} - _ Indirect = indirect{} - _ Transform = transform{} - - _ PathStep = sliceIndex{} - _ PathStep = mapIndex{} - _ PathStep = typeAssertion{} - _ PathStep = structField{} - _ PathStep = indirect{} - _ PathStep = transform{} -) +// SplitKeys are the indexes for indexing into slices in the +// x and y values, respectively. These indexes may differ due to the +// insertion or removal of an element in one of the slices, causing +// all of the indexes to be shifted. If an index is -1, then that +// indicates that the element does not exist in the associated slice. +// +// Key is guaranteed to return -1 if and only if the indexes returned +// by SplitKeys are not the same. SplitKeys will never return -1 for +// both indexes. +func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey } + +// MapIndex is an index operation on a map at some index Key. +type MapIndex struct{ *mapIndex } +type mapIndex struct { + pathStep + key reflect.Value +} + +func (mi MapIndex) Type() reflect.Type { return mi.typ } +func (mi MapIndex) Values() (vx, vy reflect.Value) { return mi.vx, mi.vy } +func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) } + +// Key is the value of the map key. +func (mi MapIndex) Key() reflect.Value { return mi.key } + +// Indirect represents pointer indirection on the parent type. +type Indirect struct{ *indirect } +type indirect struct { + pathStep +} + +func (in Indirect) Type() reflect.Type { return in.typ } +func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy } +func (in Indirect) String() string { return "*" } + +// TypeAssertion represents a type assertion on an interface. +type TypeAssertion struct{ *typeAssertion } +type typeAssertion struct { + pathStep +} + +func (ta TypeAssertion) Type() reflect.Type { return ta.typ } +func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy } +func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) } + +// Transform is a transformation from the parent type to the current type. +type Transform struct{ *transform } +type transform struct { + pathStep + trans *transformer +} + +func (tf Transform) Type() reflect.Type { return tf.typ } +func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy } +func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) } + +// Name is the name of the Transformer. +func (tf Transform) Name() string { return tf.trans.name } + +// Func is the function pointer to the transformer function. +func (tf Transform) Func() reflect.Value { return tf.trans.fnc } + +// Option returns the originally constructed Transformer option. +// The == operator can be used to detect the exact option used. +func (tf Transform) Option() Option { return tf.trans } // isExported reports whether the identifier is exported. func isExported(id string) bool { r, _ := utf8.DecodeRuneInString(id) return unicode.IsUpper(r) } - -// isValid reports whether the identifier is valid. -// Empty and underscore-only strings are not valid. -func isValid(id string) bool { - ok := id != "" && id != "_" - for j, c := range id { - ok = ok && (j > 0 || !unicode.IsDigit(c)) - ok = ok && (c == '_' || unicode.IsLetter(c) || unicode.IsDigit(c)) - } - return ok -} diff --git a/vendor/github.com/google/go-cmp/cmp/report.go b/vendor/github.com/google/go-cmp/cmp/report.go new file mode 100644 index 00000000000..6ddf29993e5 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report.go @@ -0,0 +1,51 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +// defaultReporter implements the reporter interface. +// +// As Equal serially calls the PushStep, Report, and PopStep methods, the +// defaultReporter constructs a tree-based representation of the compared value +// and the result of each comparison (see valueNode). +// +// When the String method is called, the FormatDiff method transforms the +// valueNode tree into a textNode tree, which is a tree-based representation +// of the textual output (see textNode). +// +// Lastly, the textNode.String method produces the final report as a string. +type defaultReporter struct { + root *valueNode + curr *valueNode +} + +func (r *defaultReporter) PushStep(ps PathStep) { + r.curr = r.curr.PushStep(ps) + if r.root == nil { + r.root = r.curr + } +} +func (r *defaultReporter) Report(rs Result) { + r.curr.Report(rs) +} +func (r *defaultReporter) PopStep() { + r.curr = r.curr.PopStep() +} + +// String provides a full report of the differences detected as a structured +// literal in pseudo-Go syntax. String may only be called after the entire tree +// has been traversed. +func (r *defaultReporter) String() string { + assert(r.root != nil && r.curr == nil) + if r.root.NumDiff == 0 { + return "" + } + return formatOptions{}.FormatDiff(r.root).String() +} + +func assert(ok bool) { + if !ok { + panic("assertion failure") + } +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go new file mode 100644 index 00000000000..05efb992c53 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -0,0 +1,296 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + + "github.com/google/go-cmp/cmp/internal/value" +) + +// TODO: Enforce limits? +// * Enforce maximum number of records to print per node? +// * Enforce maximum size in bytes allowed? +// * As a heuristic, use less verbosity for equal nodes than unequal nodes. +// TODO: Enforce unique outputs? +// * Avoid Stringer methods if it results in same output? +// * Print pointer address if outputs still equal? + +// numContextRecords is the number of surrounding equal records to print. +const numContextRecords = 2 + +type diffMode byte + +const ( + diffUnknown diffMode = 0 + diffIdentical diffMode = ' ' + diffRemoved diffMode = '-' + diffInserted diffMode = '+' +) + +type typeMode int + +const ( + // emitType always prints the type. + emitType typeMode = iota + // elideType never prints the type. + elideType + // autoType prints the type only for composite kinds + // (i.e., structs, slices, arrays, and maps). + autoType +) + +type formatOptions struct { + // DiffMode controls the output mode of FormatDiff. + // + // If diffUnknown, then produce a diff of the x and y values. + // If diffIdentical, then emit values as if they were equal. + // If diffRemoved, then only emit x values (ignoring y values). + // If diffInserted, then only emit y values (ignoring x values). + DiffMode diffMode + + // TypeMode controls whether to print the type for the current node. + // + // As a general rule of thumb, we always print the type of the next node + // after an interface, and always elide the type of the next node after + // a slice or map node. + TypeMode typeMode + + // formatValueOptions are options specific to printing reflect.Values. + formatValueOptions +} + +func (opts formatOptions) WithDiffMode(d diffMode) formatOptions { + opts.DiffMode = d + return opts +} +func (opts formatOptions) WithTypeMode(t typeMode) formatOptions { + opts.TypeMode = t + return opts +} + +// FormatDiff converts a valueNode tree into a textNode tree, where the later +// is a textual representation of the differences detected in the former. +func (opts formatOptions) FormatDiff(v *valueNode) textNode { + // Check whether we have specialized formatting for this node. + // This is not necessary, but helpful for producing more readable outputs. + if opts.CanFormatDiffSlice(v) { + return opts.FormatDiffSlice(v) + } + + // For leaf nodes, format the value based on the reflect.Values alone. + if v.MaxDepth == 0 { + switch opts.DiffMode { + case diffUnknown, diffIdentical: + // Format Equal. + if v.NumDiff == 0 { + outx := opts.FormatValue(v.ValueX, visitedPointers{}) + outy := opts.FormatValue(v.ValueY, visitedPointers{}) + if v.NumIgnored > 0 && v.NumSame == 0 { + return textEllipsis + } else if outx.Len() < outy.Len() { + return outx + } else { + return outy + } + } + + // Format unequal. + assert(opts.DiffMode == diffUnknown) + var list textList + outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, visitedPointers{}) + outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, visitedPointers{}) + if outx != nil { + list = append(list, textRecord{Diff: '-', Value: outx}) + } + if outy != nil { + list = append(list, textRecord{Diff: '+', Value: outy}) + } + return opts.WithTypeMode(emitType).FormatType(v.Type, list) + case diffRemoved: + return opts.FormatValue(v.ValueX, visitedPointers{}) + case diffInserted: + return opts.FormatValue(v.ValueY, visitedPointers{}) + default: + panic("invalid diff mode") + } + } + + // Descend into the child value node. + if v.TransformerName != "" { + out := opts.WithTypeMode(emitType).FormatDiff(v.Value) + out = textWrap{"Inverse(" + v.TransformerName + ", ", out, ")"} + return opts.FormatType(v.Type, out) + } else { + switch k := v.Type.Kind(); k { + case reflect.Struct, reflect.Array, reflect.Slice, reflect.Map: + return opts.FormatType(v.Type, opts.formatDiffList(v.Records, k)) + case reflect.Ptr: + return textWrap{"&", opts.FormatDiff(v.Value), ""} + case reflect.Interface: + return opts.WithTypeMode(emitType).FormatDiff(v.Value) + default: + panic(fmt.Sprintf("%v cannot have children", k)) + } + } +} + +func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) textNode { + // Derive record name based on the data structure kind. + var name string + var formatKey func(reflect.Value) string + switch k { + case reflect.Struct: + name = "field" + opts = opts.WithTypeMode(autoType) + formatKey = func(v reflect.Value) string { return v.String() } + case reflect.Slice, reflect.Array: + name = "element" + opts = opts.WithTypeMode(elideType) + formatKey = func(reflect.Value) string { return "" } + case reflect.Map: + name = "entry" + opts = opts.WithTypeMode(elideType) + formatKey = formatMapKey + } + + // Handle unification. + switch opts.DiffMode { + case diffIdentical, diffRemoved, diffInserted: + var list textList + var deferredEllipsis bool // Add final "..." to indicate records were dropped + for _, r := range recs { + // Elide struct fields that are zero value. + if k == reflect.Struct { + var isZero bool + switch opts.DiffMode { + case diffIdentical: + isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueX) + case diffRemoved: + isZero = value.IsZero(r.Value.ValueX) + case diffInserted: + isZero = value.IsZero(r.Value.ValueY) + } + if isZero { + continue + } + } + // Elide ignored nodes. + if r.Value.NumIgnored > 0 && r.Value.NumSame+r.Value.NumDiff == 0 { + deferredEllipsis = !(k == reflect.Slice || k == reflect.Array) + if !deferredEllipsis { + list.AppendEllipsis(diffStats{}) + } + continue + } + if out := opts.FormatDiff(r.Value); out != nil { + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + } + if deferredEllipsis { + list.AppendEllipsis(diffStats{}) + } + return textWrap{"{", list, "}"} + case diffUnknown: + default: + panic("invalid diff mode") + } + + // Handle differencing. + var list textList + groups := coalesceAdjacentRecords(name, recs) + for i, ds := range groups { + // Handle equal records. + if ds.NumDiff() == 0 { + // Compute the number of leading and trailing records to print. + var numLo, numHi int + numEqual := ds.NumIgnored + ds.NumIdentical + for numLo < numContextRecords && numLo+numHi < numEqual && i != 0 { + if r := recs[numLo].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { + break + } + numLo++ + } + for numHi < numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { + if r := recs[numEqual-numHi-1].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { + break + } + numHi++ + } + if numEqual-(numLo+numHi) == 1 && ds.NumIgnored == 0 { + numHi++ // Avoid pointless coalescing of a single equal record + } + + // Format the equal values. + for _, r := range recs[:numLo] { + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + if numEqual > numLo+numHi { + ds.NumIdentical -= numLo + numHi + list.AppendEllipsis(ds) + } + for _, r := range recs[numEqual-numHi : numEqual] { + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + recs = recs[numEqual:] + continue + } + + // Handle unequal records. + for _, r := range recs[:ds.NumDiff()] { + switch { + case opts.CanFormatDiffSlice(r.Value): + out := opts.FormatDiffSlice(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + case r.Value.NumChildren == r.Value.MaxDepth: + outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value) + outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value) + if outx != nil { + list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx}) + } + if outy != nil { + list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy}) + } + default: + out := opts.FormatDiff(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + } + recs = recs[ds.NumDiff():] + } + assert(len(recs) == 0) + return textWrap{"{", list, "}"} +} + +// coalesceAdjacentRecords coalesces the list of records into groups of +// adjacent equal, or unequal counts. +func coalesceAdjacentRecords(name string, recs []reportRecord) (groups []diffStats) { + var prevCase int // Arbitrary index into which case last occurred + lastStats := func(i int) *diffStats { + if prevCase != i { + groups = append(groups, diffStats{Name: name}) + prevCase = i + } + return &groups[len(groups)-1] + } + for _, r := range recs { + switch rv := r.Value; { + case rv.NumIgnored > 0 && rv.NumSame+rv.NumDiff == 0: + lastStats(1).NumIgnored++ + case rv.NumDiff == 0: + lastStats(1).NumIdentical++ + case rv.NumDiff > 0 && !rv.ValueY.IsValid(): + lastStats(2).NumRemoved++ + case rv.NumDiff > 0 && !rv.ValueX.IsValid(): + lastStats(2).NumInserted++ + default: + lastStats(2).NumModified++ + } + } + return groups +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go new file mode 100644 index 00000000000..5521c604c54 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -0,0 +1,279 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "unicode" + + "github.com/google/go-cmp/cmp/internal/flags" + "github.com/google/go-cmp/cmp/internal/value" +) + +type formatValueOptions struct { + // AvoidStringer controls whether to avoid calling custom stringer + // methods like error.Error or fmt.Stringer.String. + AvoidStringer bool + + // ShallowPointers controls whether to avoid descending into pointers. + // Useful when printing map keys, where pointer comparison is performed + // on the pointer address rather than the pointed-at value. + ShallowPointers bool + + // PrintAddresses controls whether to print the address of all pointers, + // slice elements, and maps. + PrintAddresses bool +} + +// FormatType prints the type as if it were wrapping s. +// This may return s as-is depending on the current type and TypeMode mode. +func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode { + // Check whether to emit the type or not. + switch opts.TypeMode { + case autoType: + switch t.Kind() { + case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map: + if s.Equal(textNil) { + return s + } + default: + return s + } + case elideType: + return s + } + + // Determine the type label, applying special handling for unnamed types. + typeName := t.String() + if t.Name() == "" { + // According to Go grammar, certain type literals contain symbols that + // do not strongly bind to the next lexicographical token (e.g., *T). + switch t.Kind() { + case reflect.Chan, reflect.Func, reflect.Ptr: + typeName = "(" + typeName + ")" + } + typeName = strings.Replace(typeName, "struct {", "struct{", -1) + typeName = strings.Replace(typeName, "interface {", "interface{", -1) + } + + // Avoid wrap the value in parenthesis if unnecessary. + if s, ok := s.(textWrap); ok { + hasParens := strings.HasPrefix(s.Prefix, "(") && strings.HasSuffix(s.Suffix, ")") + hasBraces := strings.HasPrefix(s.Prefix, "{") && strings.HasSuffix(s.Suffix, "}") + if hasParens || hasBraces { + return textWrap{typeName, s, ""} + } + } + return textWrap{typeName + "(", s, ")"} +} + +// FormatValue prints the reflect.Value, taking extra care to avoid descending +// into pointers already in m. As pointers are visited, m is also updated. +func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out textNode) { + if !v.IsValid() { + return nil + } + t := v.Type() + + // Check whether there is an Error or String method to call. + if !opts.AvoidStringer && v.CanInterface() { + // Avoid calling Error or String methods on nil receivers since many + // implementations crash when doing so. + if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() { + switch v := v.Interface().(type) { + case error: + return textLine("e" + formatString(v.Error())) + case fmt.Stringer: + return textLine("s" + formatString(v.String())) + } + } + } + + // Check whether to explicitly wrap the result with the type. + var skipType bool + defer func() { + if !skipType { + out = opts.FormatType(t, out) + } + }() + + var ptr string + switch t.Kind() { + case reflect.Bool: + return textLine(fmt.Sprint(v.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return textLine(fmt.Sprint(v.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + // Unnamed uints are usually bytes or words, so use hexadecimal. + if t.PkgPath() == "" || t.Kind() == reflect.Uintptr { + return textLine(formatHex(v.Uint())) + } + return textLine(fmt.Sprint(v.Uint())) + case reflect.Float32, reflect.Float64: + return textLine(fmt.Sprint(v.Float())) + case reflect.Complex64, reflect.Complex128: + return textLine(fmt.Sprint(v.Complex())) + case reflect.String: + return textLine(formatString(v.String())) + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + return textLine(formatPointer(v)) + case reflect.Struct: + var list textList + for i := 0; i < v.NumField(); i++ { + vv := v.Field(i) + if value.IsZero(vv) { + continue // Elide fields with zero values + } + s := opts.WithTypeMode(autoType).FormatValue(vv, m) + list = append(list, textRecord{Key: t.Field(i).Name, Value: s}) + } + return textWrap{"{", list, "}"} + case reflect.Slice: + if v.IsNil() { + return textNil + } + if opts.PrintAddresses { + ptr = formatPointer(v) + } + fallthrough + case reflect.Array: + var list textList + for i := 0; i < v.Len(); i++ { + vi := v.Index(i) + if vi.CanAddr() { // Check for cyclic elements + p := vi.Addr() + if m.Visit(p) { + var out textNode + out = textLine(formatPointer(p)) + out = opts.WithTypeMode(emitType).FormatType(p.Type(), out) + out = textWrap{"*", out, ""} + list = append(list, textRecord{Value: out}) + continue + } + } + s := opts.WithTypeMode(elideType).FormatValue(vi, m) + list = append(list, textRecord{Value: s}) + } + return textWrap{ptr + "{", list, "}"} + case reflect.Map: + if v.IsNil() { + return textNil + } + if m.Visit(v) { + return textLine(formatPointer(v)) + } + + var list textList + for _, k := range value.SortKeys(v.MapKeys()) { + sk := formatMapKey(k) + sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), m) + list = append(list, textRecord{Key: sk, Value: sv}) + } + if opts.PrintAddresses { + ptr = formatPointer(v) + } + return textWrap{ptr + "{", list, "}"} + case reflect.Ptr: + if v.IsNil() { + return textNil + } + if m.Visit(v) || opts.ShallowPointers { + return textLine(formatPointer(v)) + } + if opts.PrintAddresses { + ptr = formatPointer(v) + } + skipType = true // Let the underlying value print the type instead + return textWrap{"&" + ptr, opts.FormatValue(v.Elem(), m), ""} + case reflect.Interface: + if v.IsNil() { + return textNil + } + // Interfaces accept different concrete types, + // so configure the underlying value to explicitly print the type. + skipType = true // Print the concrete type instead + return opts.WithTypeMode(emitType).FormatValue(v.Elem(), m) + default: + panic(fmt.Sprintf("%v kind not handled", v.Kind())) + } +} + +// formatMapKey formats v as if it were a map key. +// The result is guaranteed to be a single line. +func formatMapKey(v reflect.Value) string { + var opts formatOptions + opts.TypeMode = elideType + opts.AvoidStringer = true + opts.ShallowPointers = true + s := opts.FormatValue(v, visitedPointers{}).String() + return strings.TrimSpace(s) +} + +// formatString prints s as a double-quoted or backtick-quoted string. +func formatString(s string) string { + // Use quoted string if it the same length as a raw string literal. + // Otherwise, attempt to use the raw string form. + qs := strconv.Quote(s) + if len(qs) == 1+len(s)+1 { + return qs + } + + // Disallow newlines to ensure output is a single line. + // Only allow printable runes for readability purposes. + rawInvalid := func(r rune) bool { + return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t') + } + if strings.IndexFunc(s, rawInvalid) < 0 { + return "`" + s + "`" + } + return qs +} + +// formatHex prints u as a hexadecimal integer in Go notation. +func formatHex(u uint64) string { + var f string + switch { + case u <= 0xff: + f = "0x%02x" + case u <= 0xffff: + f = "0x%04x" + case u <= 0xffffff: + f = "0x%06x" + case u <= 0xffffffff: + f = "0x%08x" + case u <= 0xffffffffff: + f = "0x%010x" + case u <= 0xffffffffffff: + f = "0x%012x" + case u <= 0xffffffffffffff: + f = "0x%014x" + case u <= 0xffffffffffffffff: + f = "0x%016x" + } + return fmt.Sprintf(f, u) +} + +// formatPointer prints the address of the pointer. +func formatPointer(v reflect.Value) string { + p := v.Pointer() + if flags.Deterministic { + p = 0xdeadf00f // Only used for stable testing purposes + } + return fmt.Sprintf("⟪0x%x⟫", p) +} + +type visitedPointers map[value.Pointer]struct{} + +// Visit inserts pointer v into the visited map and reports whether it had +// already been visited before. +func (m visitedPointers) Visit(v reflect.Value) bool { + p := value.PointerOf(v) + _, visited := m[p] + m[p] = struct{}{} + return visited +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go new file mode 100644 index 00000000000..8cb3265e767 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -0,0 +1,333 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "bytes" + "fmt" + "reflect" + "strings" + "unicode" + "unicode/utf8" + + "github.com/google/go-cmp/cmp/internal/diff" +) + +// CanFormatDiffSlice reports whether we support custom formatting for nodes +// that are slices of primitive kinds or strings. +func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { + switch { + case opts.DiffMode != diffUnknown: + return false // Must be formatting in diff mode + case v.NumDiff == 0: + return false // No differences detected + case v.NumIgnored+v.NumCompared+v.NumTransformed > 0: + // TODO: Handle the case where someone uses bytes.Equal on a large slice. + return false // Some custom option was used to determined equality + case !v.ValueX.IsValid() || !v.ValueY.IsValid(): + return false // Both values must be valid + } + + switch t := v.Type; t.Kind() { + case reflect.String: + case reflect.Array, reflect.Slice: + // Only slices of primitive types have specialized handling. + switch t.Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + default: + return false + } + + // If a sufficient number of elements already differ, + // use specialized formatting even if length requirement is not met. + if v.NumDiff > v.NumSame { + return true + } + default: + return false + } + + // Use specialized string diffing for longer slices or strings. + const minLength = 64 + return v.ValueX.Len() >= minLength && v.ValueY.Len() >= minLength +} + +// FormatDiffSlice prints a diff for the slices (or strings) represented by v. +// This provides custom-tailored logic to make printing of differences in +// textual strings and slices of primitive kinds more readable. +func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { + assert(opts.DiffMode == diffUnknown) + t, vx, vy := v.Type, v.ValueX, v.ValueY + + // Auto-detect the type of the data. + var isLinedText, isText, isBinary bool + var sx, sy string + switch { + case t.Kind() == reflect.String: + sx, sy = vx.String(), vy.String() + isText = true // Initial estimate, verify later + case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)): + sx, sy = string(vx.Bytes()), string(vy.Bytes()) + isBinary = true // Initial estimate, verify later + case t.Kind() == reflect.Array: + // Arrays need to be addressable for slice operations to work. + vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem() + vx2.Set(vx) + vy2.Set(vy) + vx, vy = vx2, vy2 + } + if isText || isBinary { + var numLines, lastLineIdx, maxLineLen int + isBinary = false + for i, r := range sx + sy { + if !(unicode.IsPrint(r) || unicode.IsSpace(r)) || r == utf8.RuneError { + isBinary = true + break + } + if r == '\n' { + if maxLineLen < i-lastLineIdx { + lastLineIdx = i - lastLineIdx + } + lastLineIdx = i + 1 + numLines++ + } + } + isText = !isBinary + isLinedText = isText && numLines >= 4 && maxLineLen <= 256 + } + + // Format the string into printable records. + var list textList + var delim string + switch { + // If the text appears to be multi-lined text, + // then perform differencing across individual lines. + case isLinedText: + ssx := strings.Split(sx, "\n") + ssy := strings.Split(sy, "\n") + list = opts.formatDiffSlice( + reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line", + func(v reflect.Value, d diffMode) textRecord { + s := formatString(v.Index(0).String()) + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + delim = "\n" + // If the text appears to be single-lined text, + // then perform differencing in approximately fixed-sized chunks. + // The output is printed as quoted strings. + case isText: + list = opts.formatDiffSlice( + reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte", + func(v reflect.Value, d diffMode) textRecord { + s := formatString(v.String()) + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + delim = "" + // If the text appears to be binary data, + // then perform differencing in approximately fixed-sized chunks. + // The output is inspired by hexdump. + case isBinary: + list = opts.formatDiffSlice( + reflect.ValueOf(sx), reflect.ValueOf(sy), 16, "byte", + func(v reflect.Value, d diffMode) textRecord { + var ss []string + for i := 0; i < v.Len(); i++ { + ss = append(ss, formatHex(v.Index(i).Uint())) + } + s := strings.Join(ss, ", ") + comment := commentString(fmt.Sprintf("%c|%v|", d, formatASCII(v.String()))) + return textRecord{Diff: d, Value: textLine(s), Comment: comment} + }, + ) + // For all other slices of primitive types, + // then perform differencing in approximately fixed-sized chunks. + // The size of each chunk depends on the width of the element kind. + default: + var chunkSize int + if t.Elem().Kind() == reflect.Bool { + chunkSize = 16 + } else { + switch t.Elem().Bits() { + case 8: + chunkSize = 16 + case 16: + chunkSize = 12 + case 32: + chunkSize = 8 + default: + chunkSize = 8 + } + } + list = opts.formatDiffSlice( + vx, vy, chunkSize, t.Elem().Kind().String(), + func(v reflect.Value, d diffMode) textRecord { + var ss []string + for i := 0; i < v.Len(); i++ { + switch t.Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + ss = append(ss, fmt.Sprint(v.Index(i).Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + ss = append(ss, formatHex(v.Index(i).Uint())) + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + ss = append(ss, fmt.Sprint(v.Index(i).Interface())) + } + } + s := strings.Join(ss, ", ") + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + } + + // Wrap the output with appropriate type information. + var out textNode = textWrap{"{", list, "}"} + if !isText { + // The "{...}" byte-sequence literal is not valid Go syntax for strings. + // Emit the type for extra clarity (e.g. "string{...}"). + if t.Kind() == reflect.String { + opts = opts.WithTypeMode(emitType) + } + return opts.FormatType(t, out) + } + switch t.Kind() { + case reflect.String: + out = textWrap{"strings.Join(", out, fmt.Sprintf(", %q)", delim)} + if t != reflect.TypeOf(string("")) { + out = opts.FormatType(t, out) + } + case reflect.Slice: + out = textWrap{"bytes.Join(", out, fmt.Sprintf(", %q)", delim)} + if t != reflect.TypeOf([]byte(nil)) { + out = opts.FormatType(t, out) + } + } + return out +} + +// formatASCII formats s as an ASCII string. +// This is useful for printing binary strings in a semi-legible way. +func formatASCII(s string) string { + b := bytes.Repeat([]byte{'.'}, len(s)) + for i := 0; i < len(s); i++ { + if ' ' <= s[i] && s[i] <= '~' { + b[i] = s[i] + } + } + return string(b) +} + +func (opts formatOptions) formatDiffSlice( + vx, vy reflect.Value, chunkSize int, name string, + makeRec func(reflect.Value, diffMode) textRecord, +) (list textList) { + es := diff.Difference(vx.Len(), vy.Len(), func(ix int, iy int) diff.Result { + return diff.BoolResult(vx.Index(ix).Interface() == vy.Index(iy).Interface()) + }) + + appendChunks := func(v reflect.Value, d diffMode) int { + n0 := v.Len() + for v.Len() > 0 { + n := chunkSize + if n > v.Len() { + n = v.Len() + } + list = append(list, makeRec(v.Slice(0, n), d)) + v = v.Slice(n, v.Len()) + } + return n0 - v.Len() + } + + groups := coalesceAdjacentEdits(name, es) + groups = coalesceInterveningIdentical(groups, chunkSize/4) + for i, ds := range groups { + // Print equal. + if ds.NumDiff() == 0 { + // Compute the number of leading and trailing equal bytes to print. + var numLo, numHi int + numEqual := ds.NumIgnored + ds.NumIdentical + for numLo < chunkSize*numContextRecords && numLo+numHi < numEqual && i != 0 { + numLo++ + } + for numHi < chunkSize*numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { + numHi++ + } + if numEqual-(numLo+numHi) <= chunkSize && ds.NumIgnored == 0 { + numHi = numEqual - numLo // Avoid pointless coalescing of single equal row + } + + // Print the equal bytes. + appendChunks(vx.Slice(0, numLo), diffIdentical) + if numEqual > numLo+numHi { + ds.NumIdentical -= numLo + numHi + list.AppendEllipsis(ds) + } + appendChunks(vx.Slice(numEqual-numHi, numEqual), diffIdentical) + vx = vx.Slice(numEqual, vx.Len()) + vy = vy.Slice(numEqual, vy.Len()) + continue + } + + // Print unequal. + nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved) + vx = vx.Slice(nx, vx.Len()) + ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted) + vy = vy.Slice(ny, vy.Len()) + } + assert(vx.Len() == 0 && vy.Len() == 0) + return list +} + +// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent +// equal or unequal counts. +func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) { + var prevCase int // Arbitrary index into which case last occurred + lastStats := func(i int) *diffStats { + if prevCase != i { + groups = append(groups, diffStats{Name: name}) + prevCase = i + } + return &groups[len(groups)-1] + } + for _, e := range es { + switch e { + case diff.Identity: + lastStats(1).NumIdentical++ + case diff.UniqueX: + lastStats(2).NumRemoved++ + case diff.UniqueY: + lastStats(2).NumInserted++ + case diff.Modified: + lastStats(2).NumModified++ + } + } + return groups +} + +// coalesceInterveningIdentical coalesces sufficiently short (<= windowSize) +// equal groups into adjacent unequal groups that currently result in a +// dual inserted/removed printout. This acts as a high-pass filter to smooth +// out high-frequency changes within the windowSize. +func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats { + groups, groupsOrig := groups[:0], groups + for i, ds := range groupsOrig { + if len(groups) >= 2 && ds.NumDiff() > 0 { + prev := &groups[len(groups)-2] // Unequal group + curr := &groups[len(groups)-1] // Equal group + next := &groupsOrig[i] // Unequal group + hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0 + hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0 + if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize { + *prev = (*prev).Append(*curr).Append(*next) + groups = groups[:len(groups)-1] // Truncate off equal group + continue + } + } + groups = append(groups, ds) + } + return groups +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go new file mode 100644 index 00000000000..80605d0e440 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_text.go @@ -0,0 +1,382 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "bytes" + "fmt" + "math/rand" + "strings" + "time" + + "github.com/google/go-cmp/cmp/internal/flags" +) + +var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 + +type indentMode int + +func (n indentMode) appendIndent(b []byte, d diffMode) []byte { + if flags.Deterministic || randBool { + // Use regular spaces (U+0020). + switch d { + case diffUnknown, diffIdentical: + b = append(b, " "...) + case diffRemoved: + b = append(b, "- "...) + case diffInserted: + b = append(b, "+ "...) + } + } else { + // Use non-breaking spaces (U+00a0). + switch d { + case diffUnknown, diffIdentical: + b = append(b, "  "...) + case diffRemoved: + b = append(b, "- "...) + case diffInserted: + b = append(b, "+ "...) + } + } + return repeatCount(n).appendChar(b, '\t') +} + +type repeatCount int + +func (n repeatCount) appendChar(b []byte, c byte) []byte { + for ; n > 0; n-- { + b = append(b, c) + } + return b +} + +// textNode is a simplified tree-based representation of structured text. +// Possible node types are textWrap, textList, or textLine. +type textNode interface { + // Len reports the length in bytes of a single-line version of the tree. + // Nested textRecord.Diff and textRecord.Comment fields are ignored. + Len() int + // Equal reports whether the two trees are structurally identical. + // Nested textRecord.Diff and textRecord.Comment fields are compared. + Equal(textNode) bool + // String returns the string representation of the text tree. + // It is not guaranteed that len(x.String()) == x.Len(), + // nor that x.String() == y.String() implies that x.Equal(y). + String() string + + // formatCompactTo formats the contents of the tree as a single-line string + // to the provided buffer. Any nested textRecord.Diff and textRecord.Comment + // fields are ignored. + // + // However, not all nodes in the tree should be collapsed as a single-line. + // If a node can be collapsed as a single-line, it is replaced by a textLine + // node. Since the top-level node cannot replace itself, this also returns + // the current node itself. + // + // This does not mutate the receiver. + formatCompactTo([]byte, diffMode) ([]byte, textNode) + // formatExpandedTo formats the contents of the tree as a multi-line string + // to the provided buffer. In order for column alignment to operate well, + // formatCompactTo must be called before calling formatExpandedTo. + formatExpandedTo([]byte, diffMode, indentMode) []byte +} + +// textWrap is a wrapper that concatenates a prefix and/or a suffix +// to the underlying node. +type textWrap struct { + Prefix string // e.g., "bytes.Buffer{" + Value textNode // textWrap | textList | textLine + Suffix string // e.g., "}" +} + +func (s textWrap) Len() int { + return len(s.Prefix) + s.Value.Len() + len(s.Suffix) +} +func (s1 textWrap) Equal(s2 textNode) bool { + if s2, ok := s2.(textWrap); ok { + return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix + } + return false +} +func (s textWrap) String() string { + var d diffMode + var n indentMode + _, s2 := s.formatCompactTo(nil, d) + b := n.appendIndent(nil, d) // Leading indent + b = s2.formatExpandedTo(b, d, n) // Main body + b = append(b, '\n') // Trailing newline + return string(b) +} +func (s textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + n0 := len(b) // Original buffer length + b = append(b, s.Prefix...) + b, s.Value = s.Value.formatCompactTo(b, d) + b = append(b, s.Suffix...) + if _, ok := s.Value.(textLine); ok { + return b, textLine(b[n0:]) + } + return b, s +} +func (s textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { + b = append(b, s.Prefix...) + b = s.Value.formatExpandedTo(b, d, n) + b = append(b, s.Suffix...) + return b +} + +// textList is a comma-separated list of textWrap or textLine nodes. +// The list may be formatted as multi-lines or single-line at the discretion +// of the textList.formatCompactTo method. +type textList []textRecord +type textRecord struct { + Diff diffMode // e.g., 0 or '-' or '+' + Key string // e.g., "MyField" + Value textNode // textWrap | textLine + Comment fmt.Stringer // e.g., "6 identical fields" +} + +// AppendEllipsis appends a new ellipsis node to the list if none already +// exists at the end. If cs is non-zero it coalesces the statistics with the +// previous diffStats. +func (s *textList) AppendEllipsis(ds diffStats) { + hasStats := ds != diffStats{} + if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) { + if hasStats { + *s = append(*s, textRecord{Value: textEllipsis, Comment: ds}) + } else { + *s = append(*s, textRecord{Value: textEllipsis}) + } + return + } + if hasStats { + (*s)[len(*s)-1].Comment = (*s)[len(*s)-1].Comment.(diffStats).Append(ds) + } +} + +func (s textList) Len() (n int) { + for i, r := range s { + n += len(r.Key) + if r.Key != "" { + n += len(": ") + } + n += r.Value.Len() + if i < len(s)-1 { + n += len(", ") + } + } + return n +} + +func (s1 textList) Equal(s2 textNode) bool { + if s2, ok := s2.(textList); ok { + if len(s1) != len(s2) { + return false + } + for i := range s1 { + r1, r2 := s1[i], s2[i] + if !(r1.Diff == r2.Diff && r1.Key == r2.Key && r1.Value.Equal(r2.Value) && r1.Comment == r2.Comment) { + return false + } + } + return true + } + return false +} + +func (s textList) String() string { + return textWrap{"{", s, "}"}.String() +} + +func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + s = append(textList(nil), s...) // Avoid mutating original + + // Determine whether we can collapse this list as a single line. + n0 := len(b) // Original buffer length + var multiLine bool + for i, r := range s { + if r.Diff == diffInserted || r.Diff == diffRemoved { + multiLine = true + } + b = append(b, r.Key...) + if r.Key != "" { + b = append(b, ": "...) + } + b, s[i].Value = r.Value.formatCompactTo(b, d|r.Diff) + if _, ok := s[i].Value.(textLine); !ok { + multiLine = true + } + if r.Comment != nil { + multiLine = true + } + if i < len(s)-1 { + b = append(b, ", "...) + } + } + // Force multi-lined output when printing a removed/inserted node that + // is sufficiently long. + if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > 80 { + multiLine = true + } + if !multiLine { + return b, textLine(b[n0:]) + } + return b, s +} + +func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { + alignKeyLens := s.alignLens( + func(r textRecord) bool { + _, isLine := r.Value.(textLine) + return r.Key == "" || !isLine + }, + func(r textRecord) int { return len(r.Key) }, + ) + alignValueLens := s.alignLens( + func(r textRecord) bool { + _, isLine := r.Value.(textLine) + return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil + }, + func(r textRecord) int { return len(r.Value.(textLine)) }, + ) + + // Format the list as a multi-lined output. + n++ + for i, r := range s { + b = n.appendIndent(append(b, '\n'), d|r.Diff) + if r.Key != "" { + b = append(b, r.Key+": "...) + } + b = alignKeyLens[i].appendChar(b, ' ') + + b = r.Value.formatExpandedTo(b, d|r.Diff, n) + if !r.Value.Equal(textEllipsis) { + b = append(b, ',') + } + b = alignValueLens[i].appendChar(b, ' ') + + if r.Comment != nil { + b = append(b, " // "+r.Comment.String()...) + } + } + n-- + + return n.appendIndent(append(b, '\n'), d) +} + +func (s textList) alignLens( + skipFunc func(textRecord) bool, + lenFunc func(textRecord) int, +) []repeatCount { + var startIdx, endIdx, maxLen int + lens := make([]repeatCount, len(s)) + for i, r := range s { + if skipFunc(r) { + for j := startIdx; j < endIdx && j < len(s); j++ { + lens[j] = repeatCount(maxLen - lenFunc(s[j])) + } + startIdx, endIdx, maxLen = i+1, i+1, 0 + } else { + if maxLen < lenFunc(r) { + maxLen = lenFunc(r) + } + endIdx = i + 1 + } + } + for j := startIdx; j < endIdx && j < len(s); j++ { + lens[j] = repeatCount(maxLen - lenFunc(s[j])) + } + return lens +} + +// textLine is a single-line segment of text and is always a leaf node +// in the textNode tree. +type textLine []byte + +var ( + textNil = textLine("nil") + textEllipsis = textLine("...") +) + +func (s textLine) Len() int { + return len(s) +} +func (s1 textLine) Equal(s2 textNode) bool { + if s2, ok := s2.(textLine); ok { + return bytes.Equal([]byte(s1), []byte(s2)) + } + return false +} +func (s textLine) String() string { + return string(s) +} +func (s textLine) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + return append(b, s...), s +} +func (s textLine) formatExpandedTo(b []byte, _ diffMode, _ indentMode) []byte { + return append(b, s...) +} + +type diffStats struct { + Name string + NumIgnored int + NumIdentical int + NumRemoved int + NumInserted int + NumModified int +} + +func (s diffStats) NumDiff() int { + return s.NumRemoved + s.NumInserted + s.NumModified +} + +func (s diffStats) Append(ds diffStats) diffStats { + assert(s.Name == ds.Name) + s.NumIgnored += ds.NumIgnored + s.NumIdentical += ds.NumIdentical + s.NumRemoved += ds.NumRemoved + s.NumInserted += ds.NumInserted + s.NumModified += ds.NumModified + return s +} + +// String prints a humanly-readable summary of coalesced records. +// +// Example: +// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields" +func (s diffStats) String() string { + var ss []string + var sum int + labels := [...]string{"ignored", "identical", "removed", "inserted", "modified"} + counts := [...]int{s.NumIgnored, s.NumIdentical, s.NumRemoved, s.NumInserted, s.NumModified} + for i, n := range counts { + if n > 0 { + ss = append(ss, fmt.Sprintf("%d %v", n, labels[i])) + } + sum += n + } + + // Pluralize the name (adjusting for some obscure English grammar rules). + name := s.Name + if sum > 1 { + name = name + "s" + if strings.HasSuffix(name, "ys") { + name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries" + } + } + + // Format the list according to English grammar (with Oxford comma). + switch n := len(ss); n { + case 0: + return "" + case 1, 2: + return strings.Join(ss, " and ") + " " + name + default: + return strings.Join(ss[:n-1], ", ") + ", and " + ss[n-1] + " " + name + } +} + +type commentString string + +func (s commentString) String() string { return string(s) } diff --git a/vendor/github.com/google/go-cmp/cmp/report_value.go b/vendor/github.com/google/go-cmp/cmp/report_value.go new file mode 100644 index 00000000000..83031a7f507 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_value.go @@ -0,0 +1,121 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import "reflect" + +// valueNode represents a single node within a report, which is a +// structured representation of the value tree, containing information +// regarding which nodes are equal or not. +type valueNode struct { + parent *valueNode + + Type reflect.Type + ValueX reflect.Value + ValueY reflect.Value + + // NumSame is the number of leaf nodes that are equal. + // All descendants are equal only if NumDiff is 0. + NumSame int + // NumDiff is the number of leaf nodes that are not equal. + NumDiff int + // NumIgnored is the number of leaf nodes that are ignored. + NumIgnored int + // NumCompared is the number of leaf nodes that were compared + // using an Equal method or Comparer function. + NumCompared int + // NumTransformed is the number of non-leaf nodes that were transformed. + NumTransformed int + // NumChildren is the number of transitive descendants of this node. + // This counts from zero; thus, leaf nodes have no descendants. + NumChildren int + // MaxDepth is the maximum depth of the tree. This counts from zero; + // thus, leaf nodes have a depth of zero. + MaxDepth int + + // Records is a list of struct fields, slice elements, or map entries. + Records []reportRecord // If populated, implies Value is not populated + + // Value is the result of a transformation, pointer indirect, of + // type assertion. + Value *valueNode // If populated, implies Records is not populated + + // TransformerName is the name of the transformer. + TransformerName string // If non-empty, implies Value is populated +} +type reportRecord struct { + Key reflect.Value // Invalid for slice element + Value *valueNode +} + +func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) { + vx, vy := ps.Values() + child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy} + switch s := ps.(type) { + case StructField: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child}) + case SliceIndex: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Value: child}) + case MapIndex: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child}) + case Indirect: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + case TypeAssertion: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + case Transform: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + parent.TransformerName = s.Name() + parent.NumTransformed++ + default: + assert(parent == nil) // Must be the root step + } + return child +} + +func (r *valueNode) Report(rs Result) { + assert(r.MaxDepth == 0) // May only be called on leaf nodes + + if rs.ByIgnore() { + r.NumIgnored++ + } else { + if rs.Equal() { + r.NumSame++ + } else { + r.NumDiff++ + } + } + assert(r.NumSame+r.NumDiff+r.NumIgnored == 1) + + if rs.ByMethod() { + r.NumCompared++ + } + if rs.ByFunc() { + r.NumCompared++ + } + assert(r.NumCompared <= 1) +} + +func (child *valueNode) PopStep() (parent *valueNode) { + if child.parent == nil { + return nil + } + parent = child.parent + parent.NumSame += child.NumSame + parent.NumDiff += child.NumDiff + parent.NumIgnored += child.NumIgnored + parent.NumCompared += child.NumCompared + parent.NumTransformed += child.NumTransformed + parent.NumChildren += child.NumChildren + 1 + if parent.MaxDepth < child.MaxDepth+1 { + parent.MaxDepth = child.MaxDepth + 1 + } + return parent +} diff --git a/vendor/github.com/google/go-cmp/cmp/reporter.go b/vendor/github.com/google/go-cmp/cmp/reporter.go deleted file mode 100644 index 20e9f18e0dd..00000000000 --- a/vendor/github.com/google/go-cmp/cmp/reporter.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -package cmp - -import ( - "fmt" - "reflect" - "strings" - - "github.com/google/go-cmp/cmp/internal/value" -) - -type defaultReporter struct { - Option - diffs []string // List of differences, possibly truncated - ndiffs int // Total number of differences - nbytes int // Number of bytes in diffs - nlines int // Number of lines in diffs -} - -var _ reporter = (*defaultReporter)(nil) - -func (r *defaultReporter) Report(x, y reflect.Value, eq bool, p Path) { - if eq { - return // Ignore equal results - } - const maxBytes = 4096 - const maxLines = 256 - r.ndiffs++ - if r.nbytes < maxBytes && r.nlines < maxLines { - sx := value.Format(x, value.FormatConfig{UseStringer: true}) - sy := value.Format(y, value.FormatConfig{UseStringer: true}) - if sx == sy { - // Unhelpful output, so use more exact formatting. - sx = value.Format(x, value.FormatConfig{PrintPrimitiveType: true}) - sy = value.Format(y, value.FormatConfig{PrintPrimitiveType: true}) - } - s := fmt.Sprintf("%#v:\n\t-: %s\n\t+: %s\n", p, sx, sy) - r.diffs = append(r.diffs, s) - r.nbytes += len(s) - r.nlines += strings.Count(s, "\n") - } -} - -func (r *defaultReporter) String() string { - s := strings.Join(r.diffs, "") - if r.ndiffs == len(r.diffs) { - return s - } - return fmt.Sprintf("%s... %d more differences ...", s, r.ndiffs-len(r.diffs)) -} diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/loader_load.go b/vendor/github.com/hashicorp/terraform/configs/configload/loader_load.go index 93a94204fce..0e6cba93d68 100644 --- a/vendor/github.com/hashicorp/terraform/configs/configload/loader_load.go +++ b/vendor/github.com/hashicorp/terraform/configs/configload/loader_load.go @@ -64,7 +64,15 @@ func (l *Loader) moduleWalkerLoad(req *configs.ModuleRequest) (*configs.Module, Subject: &req.SourceAddrRange, }) } - if !req.VersionConstraint.Required.Check(record.Version) { + if len(req.VersionConstraint.Required) > 0 && record.Version == nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Module version requirements have changed", + Detail: "The version requirements have changed since this module was installed and the installed version is no longer acceptable. Run \"terraform init\" to install all modules required by this configuration.", + Subject: &req.SourceAddrRange, + }) + } + if record.Version != nil && !req.VersionConstraint.Required.Check(record.Version) { diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Module version requirements have changed", diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/schema.go b/vendor/github.com/hashicorp/terraform/helper/schema/schema.go index 6a3c15a6465..26b180e0322 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/schema.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/schema.go @@ -1365,10 +1365,12 @@ func (m schemaMap) validate( "%q: this field cannot be set", k)} } - if raw == config.UnknownVariableValue { - // If the value is unknown then we can't validate it yet. - // In particular, this avoids spurious type errors where downstream - // validation code sees UnknownVariableValue as being just a string. + // If the value is unknown then we can't validate it yet. + // In particular, this avoids spurious type errors where downstream + // validation code sees UnknownVariableValue as being just a string. + // The SDK has to allow the unknown value through initially, so that + // Required fields set via an interpolated value are accepted. + if !isWhollyKnown(raw) { return nil, nil } @@ -1380,6 +1382,28 @@ func (m schemaMap) validate( return m.validateType(k, raw, schema, c) } +// isWhollyKnown returns false if the argument contains an UnknownVariableValue +func isWhollyKnown(raw interface{}) bool { + switch raw := raw.(type) { + case string: + if raw == config.UnknownVariableValue { + return false + } + case []interface{}: + for _, v := range raw { + if !isWhollyKnown(v) { + return false + } + } + case map[string]interface{}: + for _, v := range raw { + if !isWhollyKnown(v) { + return false + } + } + } + return true +} func (m schemaMap) validateConflictingAttributes( k string, schema *Schema, diff --git a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/schema.go b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/schema.go index 2f2463a5cdb..47a02565922 100644 --- a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/schema.go +++ b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/schema.go @@ -55,10 +55,11 @@ func effectiveSchema(given *hcl.BodySchema, body hcl.Body, ambiguousNames map[st }, } content, _, _ = body.PartialContent(&probeSchema) - if len(content.Blocks) > 0 { - // No attribute present and at least one block present, so - // we'll need to rewrite this one as a block for a successful - // result. + if len(content.Blocks) > 0 || dynamicExpanded { + // A dynamic block with an empty iterator returns nothing. + // If there's no attribute and we have either a block or a + // dynamic expansion, we need to rewrite this one as a + // block for a successful result. appearsAsBlock[name] = struct{}{} } } diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/collection.go b/vendor/github.com/hashicorp/terraform/lang/funcs/collection.go index 71b7a846667..ab68a641197 100644 --- a/vendor/github.com/hashicorp/terraform/lang/funcs/collection.go +++ b/vendor/github.com/hashicorp/terraform/lang/funcs/collection.go @@ -363,6 +363,9 @@ var DistinctFunc = function.New(&function.Spec{ } } + if len(list) == 0 { + return cty.ListValEmpty(retType.ElementType()), nil + } return cty.ListVal(list), nil }, }) @@ -797,10 +800,12 @@ var MatchkeysFunc = function.New(&function.Spec{ }, }, Type: func(args []cty.Value) (cty.Type, error) { - if !args[1].Type().Equals(args[2].Type()) { - return cty.NilType, errors.New("lists must be of the same type") + ty, _ := convert.UnifyUnsafe([]cty.Type{args[1].Type(), args[2].Type()}) + if ty == cty.NilType { + return cty.NilType, errors.New("keys and searchset must be of the same type") } + // the return type is based on args[0] (values) return args[0].Type(), nil }, Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { @@ -813,10 +818,14 @@ var MatchkeysFunc = function.New(&function.Spec{ } output := make([]cty.Value, 0) - values := args[0] - keys := args[1] - searchset := args[2] + + // Keys and searchset must be the same type. + // We can skip error checking here because we've already verified that + // they can be unified in the Type function + ty, _ := convert.UnifyUnsafe([]cty.Type{args[1].Type(), args[2].Type()}) + keys, _ := convert.Convert(args[1], ty) + searchset, _ := convert.Convert(args[2], ty) // if searchset is empty, return an empty list. if searchset.LengthInt() == 0 { diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go index 3a0570c5b62..5148455854b 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go @@ -238,21 +238,31 @@ func (n *NodeAbstractResourceInstance) References() []*addrs.Reference { // need to do a little work here to massage this to the form we now // want. var result []*addrs.Reference - for _, addr := range s.Current.Dependencies { - if addr == nil { - // Should never happen; indicates a bug in the state loader - panic(fmt.Sprintf("dependencies for current object on %s contains nil address", n.ResourceInstanceAddr())) - } - // This is a little weird: we need to manufacture an addrs.Reference - // with a fake range here because the state isn't something we can - // make source references into. - result = append(result, &addrs.Reference{ - Subject: addr, - SourceRange: tfdiags.SourceRange{ - Filename: "(state file)", - }, - }) + // It is (apparently) possible for s.Current to be nil. This proved + // difficult to reproduce, so we will fix the symptom here and hope + // to find the root cause another time. + // + // https://github.com/hashicorp/terraform/issues/21407 + if s.Current == nil { + log.Printf("[WARN] no current state found for %s", n.Name()) + } else { + for _, addr := range s.Current.Dependencies { + if addr == nil { + // Should never happen; indicates a bug in the state loader + panic(fmt.Sprintf("dependencies for current object on %s contains nil address", n.ResourceInstanceAddr())) + } + + // This is a little weird: we need to manufacture an addrs.Reference + // with a fake range here because the state isn't something we can + // make source references into. + result = append(result, &addrs.Reference{ + Subject: addr, + SourceRange: tfdiags.SourceRange{ + Filename: "(state file)", + }, + }) + } } return result } diff --git a/vendor/github.com/hashicorp/terraform/version/version.go b/vendor/github.com/hashicorp/terraform/version/version.go index 30d72844a4a..ae8f127d5ce 100644 --- a/vendor/github.com/hashicorp/terraform/version/version.go +++ b/vendor/github.com/hashicorp/terraform/version/version.go @@ -11,7 +11,7 @@ import ( ) // The main version number that is being run at the moment. -var Version = "0.12.0" +var Version = "0.12.1" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release diff --git a/vendor/golang.org/x/tools/go/analysis/analysis.go b/vendor/golang.org/x/tools/go/analysis/analysis.go index 4d8a6e5e7d9..8eb73162593 100644 --- a/vendor/golang.org/x/tools/go/analysis/analysis.go +++ b/vendor/golang.org/x/tools/go/analysis/analysis.go @@ -128,10 +128,32 @@ type Pass struct { // See comments for ExportObjectFact. ExportPackageFact func(fact Fact) + // AllPackageFacts returns a new slice containing all package facts in unspecified order. + // WARNING: This is an experimental API and may change in the future. + AllPackageFacts func() []PackageFact + + // AllObjectFacts returns a new slice containing all object facts in unspecified order. + // WARNING: This is an experimental API and may change in the future. + AllObjectFacts func() []ObjectFact + /* Further fields may be added in future. */ // For example, suggested or applied refactorings. } +// PackageFact is a package together with an associated fact. +// WARNING: This is an experimental API and may change in the future. +type PackageFact struct { + Package *types.Package + Fact Fact +} + +// ObjectFact is an object together with an associated fact. +// WARNING: This is an experimental API and may change in the future. +type ObjectFact struct { + Object types.Object + Fact Fact +} + // Reportf is a helper function that reports a Diagnostic using the // specified position and formatted error message. func (pass *Pass) Reportf(pos token.Pos, format string, args ...interface{}) { diff --git a/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go b/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go new file mode 100644 index 00000000000..a03a185fc0a --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go @@ -0,0 +1,344 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package analysisflags defines helpers for processing flags of +// analysis driver tools. +package analysisflags + +import ( + "crypto/sha256" + "encoding/json" + "flag" + "fmt" + "go/token" + "io" + "io/ioutil" + "log" + "os" + "strconv" + "strings" + + "golang.org/x/tools/go/analysis" +) + +// flags common to all {single,multi,unit}checkers. +var ( + JSON = false // -json + Context = -1 // -c=N: if N>0, display offending line plus N lines of context +) + +// Parse creates a flag for each of the analyzer's flags, +// including (in multi mode) a flag named after the analyzer, +// parses the flags, then filters and returns the list of +// analyzers enabled by flags. +func Parse(analyzers []*analysis.Analyzer, multi bool) []*analysis.Analyzer { + // Connect each analysis flag to the command line as -analysis.flag. + enabled := make(map[*analysis.Analyzer]*triState) + for _, a := range analyzers { + var prefix string + + // Add -NAME flag to enable it. + if multi { + prefix = a.Name + "." + + enable := new(triState) + enableUsage := "enable " + a.Name + " analysis" + flag.Var(enable, a.Name, enableUsage) + enabled[a] = enable + } + + a.Flags.VisitAll(func(f *flag.Flag) { + if !multi && flag.Lookup(f.Name) != nil { + log.Printf("%s flag -%s would conflict with driver; skipping", a.Name, f.Name) + return + } + + name := prefix + f.Name + flag.Var(f.Value, name, f.Usage) + }) + } + + // standard flags: -flags, -V. + printflags := flag.Bool("flags", false, "print analyzer flags in JSON") + addVersionFlag() + + // flags common to all checkers + flag.BoolVar(&JSON, "json", JSON, "emit JSON output") + flag.IntVar(&Context, "c", Context, `display offending line with this many lines of context`) + + // Add shims for legacy vet flags to enable existing + // scripts that run vet to continue to work. + _ = flag.Bool("source", false, "no effect (deprecated)") + _ = flag.Bool("v", false, "no effect (deprecated)") + _ = flag.Bool("all", false, "no effect (deprecated)") + _ = flag.String("tags", "", "no effect (deprecated)") + for old, new := range vetLegacyFlags { + newFlag := flag.Lookup(new) + if newFlag != nil && flag.Lookup(old) == nil { + flag.Var(newFlag.Value, old, "deprecated alias for -"+new) + } + } + + flag.Parse() // (ExitOnError) + + // -flags: print flags so that go vet knows which ones are legitimate. + if *printflags { + printFlags() + os.Exit(0) + } + + // If any -NAME flag is true, run only those analyzers. Otherwise, + // if any -NAME flag is false, run all but those analyzers. + if multi { + var hasTrue, hasFalse bool + for _, ts := range enabled { + switch *ts { + case setTrue: + hasTrue = true + case setFalse: + hasFalse = true + } + } + + var keep []*analysis.Analyzer + if hasTrue { + for _, a := range analyzers { + if *enabled[a] == setTrue { + keep = append(keep, a) + } + } + analyzers = keep + } else if hasFalse { + for _, a := range analyzers { + if *enabled[a] != setFalse { + keep = append(keep, a) + } + } + analyzers = keep + } + } + + return analyzers +} + +func printFlags() { + type jsonFlag struct { + Name string + Bool bool + Usage string + } + var flags []jsonFlag = nil + flag.VisitAll(func(f *flag.Flag) { + // Don't report {single,multi}checker debugging + // flags as these have no effect on unitchecker + // (as invoked by 'go vet'). + switch f.Name { + case "debug", "cpuprofile", "memprofile", "trace": + return + } + + b, ok := f.Value.(interface{ IsBoolFlag() bool }) + isBool := ok && b.IsBoolFlag() + flags = append(flags, jsonFlag{f.Name, isBool, f.Usage}) + }) + data, err := json.MarshalIndent(flags, "", "\t") + if err != nil { + log.Fatal(err) + } + os.Stdout.Write(data) +} + +// addVersionFlag registers a -V flag that, if set, +// prints the executable version and exits 0. +// +// If the -V flag already exists — for example, because it was already +// registered by a call to cmd/internal/objabi.AddVersionFlag — then +// addVersionFlag does nothing. +func addVersionFlag() { + if flag.Lookup("V") == nil { + flag.Var(versionFlag{}, "V", "print version and exit") + } +} + +// versionFlag minimally complies with the -V protocol required by "go vet". +type versionFlag struct{} + +func (versionFlag) IsBoolFlag() bool { return true } +func (versionFlag) Get() interface{} { return nil } +func (versionFlag) String() string { return "" } +func (versionFlag) Set(s string) error { + if s != "full" { + log.Fatalf("unsupported flag value: -V=%s", s) + } + + // This replicates the miminal subset of + // cmd/internal/objabi.AddVersionFlag, which is private to the + // go tool yet forms part of our command-line interface. + // TODO(adonovan): clarify the contract. + + // Print the tool version so the build system can track changes. + // Formats: + // $progname version devel ... buildID=... + // $progname version go1.9.1 + progname := os.Args[0] + f, err := os.Open(progname) + if err != nil { + log.Fatal(err) + } + h := sha256.New() + if _, err := io.Copy(h, f); err != nil { + log.Fatal(err) + } + f.Close() + fmt.Printf("%s version devel comments-go-here buildID=%02x\n", + progname, string(h.Sum(nil))) + os.Exit(0) + return nil +} + +// A triState is a boolean that knows whether +// it has been set to either true or false. +// It is used to identify whether a flag appears; +// the standard boolean flag cannot +// distinguish missing from unset. +// It also satisfies flag.Value. +type triState int + +const ( + unset triState = iota + setTrue + setFalse +) + +func triStateFlag(name string, value triState, usage string) *triState { + flag.Var(&value, name, usage) + return &value +} + +// triState implements flag.Value, flag.Getter, and flag.boolFlag. +// They work like boolean flags: we can say vet -printf as well as vet -printf=true +func (ts *triState) Get() interface{} { + return *ts == setTrue +} + +func (ts triState) isTrue() bool { + return ts == setTrue +} + +func (ts *triState) Set(value string) error { + b, err := strconv.ParseBool(value) + if err != nil { + // This error message looks poor but package "flag" adds + // "invalid boolean value %q for -NAME: %s" + return fmt.Errorf("want true or false") + } + if b { + *ts = setTrue + } else { + *ts = setFalse + } + return nil +} + +func (ts *triState) String() string { + switch *ts { + case unset: + return "true" + case setTrue: + return "true" + case setFalse: + return "false" + } + panic("not reached") +} + +func (ts triState) IsBoolFlag() bool { + return true +} + +// Legacy flag support + +// vetLegacyFlags maps flags used by legacy vet to their corresponding +// new names. The old names will continue to work. +var vetLegacyFlags = map[string]string{ + // Analyzer name changes + "bool": "bools", + "buildtags": "buildtag", + "methods": "stdmethods", + "rangeloops": "loopclosure", + + // Analyzer flags + "compositewhitelist": "composites.whitelist", + "printfuncs": "printf.funcs", + "shadowstrict": "shadow.strict", + "unusedfuncs": "unusedresult.funcs", + "unusedstringmethods": "unusedresult.stringmethods", +} + +// ---- output helpers common to all drivers ---- + +// PrintPlain prints a diagnostic in plain text form, +// with context specified by the -c flag. +func PrintPlain(fset *token.FileSet, diag analysis.Diagnostic) { + posn := fset.Position(diag.Pos) + fmt.Fprintf(os.Stderr, "%s: %s\n", posn, diag.Message) + + // -c=N: show offending line plus N lines of context. + if Context >= 0 { + data, _ := ioutil.ReadFile(posn.Filename) + lines := strings.Split(string(data), "\n") + for i := posn.Line - Context; i <= posn.Line+Context; i++ { + if 1 <= i && i <= len(lines) { + fmt.Fprintf(os.Stderr, "%d\t%s\n", i, lines[i-1]) + } + } + } +} + +// A JSONTree is a mapping from package ID to analysis name to result. +// Each result is either a jsonError or a list of jsonDiagnostic. +type JSONTree map[string]map[string]interface{} + +// Add adds the result of analysis 'name' on package 'id'. +// The result is either a list of diagnostics or an error. +func (tree JSONTree) Add(fset *token.FileSet, id, name string, diags []analysis.Diagnostic, err error) { + var v interface{} + if err != nil { + type jsonError struct { + Err string `json:"error"` + } + v = jsonError{err.Error()} + } else if len(diags) > 0 { + type jsonDiagnostic struct { + Category string `json:"category,omitempty"` + Posn string `json:"posn"` + Message string `json:"message"` + } + var diagnostics []jsonDiagnostic + for _, f := range diags { + diagnostics = append(diagnostics, jsonDiagnostic{ + Category: f.Category, + Posn: fset.Position(f.Pos).String(), + Message: f.Message, + }) + } + v = diagnostics + } + if v != nil { + m, ok := tree[id] + if !ok { + m = make(map[string]interface{}) + tree[id] = m + } + m[name] = v + } +} + +func (tree JSONTree) Print() { + data, err := json.MarshalIndent(tree, "", "\t") + if err != nil { + log.Panicf("internal error: JSON marshalling failed: %v", err) + } + fmt.Printf("%s\n", data) +} diff --git a/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/help.go b/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/help.go new file mode 100644 index 00000000000..c5a70f3b7d6 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/help.go @@ -0,0 +1,92 @@ +package analysisflags + +import ( + "flag" + "fmt" + "log" + "os" + "sort" + "strings" + + "golang.org/x/tools/go/analysis" +) + +const help = `PROGNAME is a tool for static analysis of Go programs. + +PROGNAME examines Go source code and reports suspicious constructs, +such as Printf calls whose arguments do not align with the format +string. It uses heuristics that do not guarantee all reports are +genuine problems, but it can find errors not caught by the compilers. +` + +// Help implements the help subcommand for a multichecker or unitchecker +// style command. The optional args specify the analyzers to describe. +// Help calls log.Fatal if no such analyzer exists. +func Help(progname string, analyzers []*analysis.Analyzer, args []string) { + // No args: show summary of all analyzers. + if len(args) == 0 { + fmt.Println(strings.Replace(help, "PROGNAME", progname, -1)) + fmt.Println("Registered analyzers:") + fmt.Println() + sort.Slice(analyzers, func(i, j int) bool { + return analyzers[i].Name < analyzers[j].Name + }) + for _, a := range analyzers { + title := strings.Split(a.Doc, "\n\n")[0] + fmt.Printf(" %-12s %s\n", a.Name, title) + } + fmt.Println("\nBy default all analyzers are run.") + fmt.Println("To select specific analyzers, use the -NAME flag for each one,") + fmt.Println(" or -NAME=false to run all analyzers not explicitly disabled.") + + // Show only the core command-line flags. + fmt.Println("\nCore flags:") + fmt.Println() + fs := flag.NewFlagSet("", flag.ExitOnError) + flag.VisitAll(func(f *flag.Flag) { + if !strings.Contains(f.Name, ".") { + fs.Var(f.Value, f.Name, f.Usage) + } + }) + fs.SetOutput(os.Stdout) + fs.PrintDefaults() + + fmt.Printf("\nTo see details and flags of a specific analyzer, run '%s help name'.\n", progname) + + return + } + + // Show help on specific analyzer(s). +outer: + for _, arg := range args { + for _, a := range analyzers { + if a.Name == arg { + paras := strings.Split(a.Doc, "\n\n") + title := paras[0] + fmt.Printf("%s: %s\n", a.Name, title) + + // Show only the flags relating to this analysis, + // properly prefixed. + first := true + fs := flag.NewFlagSet(a.Name, flag.ExitOnError) + a.Flags.VisitAll(func(f *flag.Flag) { + if first { + first = false + fmt.Println("\nAnalyzer flags:") + fmt.Println() + } + fs.Var(f.Value, a.Name+"."+f.Name, f.Usage) + }) + fs.SetOutput(os.Stdout) + fs.PrintDefaults() + + if len(paras) > 1 { + fmt.Printf("\n%s\n", strings.Join(paras[1:], "\n\n")) + } + + continue outer + } + } + log.Fatalf("Analyzer %q not registered", arg) + } +} diff --git a/vendor/golang.org/x/tools/go/analysis/internal/checker/checker.go b/vendor/golang.org/x/tools/go/analysis/internal/checker/checker.go new file mode 100644 index 00000000000..4cade777054 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/internal/checker/checker.go @@ -0,0 +1,726 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package checker defines the implementation of the checker commands. +// The same code drives the multi-analysis driver, the single-analysis +// driver that is conventionally provided for convenience along with +// each analysis package, and the test driver. +package checker + +import ( + "bytes" + "encoding/gob" + "flag" + "fmt" + "go/token" + "go/types" + "log" + "os" + "reflect" + "runtime" + "runtime/pprof" + "runtime/trace" + "sort" + "strings" + "sync" + "time" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/internal/analysisflags" + "golang.org/x/tools/go/packages" +) + +var ( + // Debug is a set of single-letter flags: + // + // f show [f]acts as they are created + // p disable [p]arallel execution of analyzers + // s do additional [s]anity checks on fact types and serialization + // t show [t]iming info (NB: use 'p' flag to avoid GC/scheduler noise) + // v show [v]erbose logging + // + Debug = "" + + // Log files for optional performance tracing. + CPUProfile, MemProfile, Trace string +) + +// RegisterFlags registers command-line flags used by the analysis driver. +func RegisterFlags() { + // When adding flags here, remember to update + // the list of suppressed flags in analysisflags. + + flag.StringVar(&Debug, "debug", Debug, `debug flags, any subset of "fpstv"`) + + flag.StringVar(&CPUProfile, "cpuprofile", "", "write CPU profile to this file") + flag.StringVar(&MemProfile, "memprofile", "", "write memory profile to this file") + flag.StringVar(&Trace, "trace", "", "write trace log to this file") +} + +// Run loads the packages specified by args using go/packages, +// then applies the specified analyzers to them. +// Analysis flags must already have been set. +// It provides most of the logic for the main functions of both the +// singlechecker and the multi-analysis commands. +// It returns the appropriate exit code. +func Run(args []string, analyzers []*analysis.Analyzer) (exitcode int) { + if CPUProfile != "" { + f, err := os.Create(CPUProfile) + if err != nil { + log.Fatal(err) + } + if err := pprof.StartCPUProfile(f); err != nil { + log.Fatal(err) + } + // NB: profile won't be written in case of error. + defer pprof.StopCPUProfile() + } + + if Trace != "" { + f, err := os.Create(Trace) + if err != nil { + log.Fatal(err) + } + if err := trace.Start(f); err != nil { + log.Fatal(err) + } + // NB: trace log won't be written in case of error. + defer func() { + trace.Stop() + log.Printf("To view the trace, run:\n$ go tool trace view %s", Trace) + }() + } + + if MemProfile != "" { + f, err := os.Create(MemProfile) + if err != nil { + log.Fatal(err) + } + // NB: memprofile won't be written in case of error. + defer func() { + runtime.GC() // get up-to-date statistics + if err := pprof.WriteHeapProfile(f); err != nil { + log.Fatalf("Writing memory profile: %v", err) + } + f.Close() + }() + } + + // Load the packages. + if dbg('v') { + log.SetPrefix("") + log.SetFlags(log.Lmicroseconds) // display timing + log.Printf("load %s", args) + } + + // Optimization: if the selected analyzers don't produce/consume + // facts, we need source only for the initial packages. + allSyntax := needFacts(analyzers) + initial, err := load(args, allSyntax) + if err != nil { + log.Print(err) + return 1 // load errors + } + + // Print the results. + roots := analyze(initial, analyzers) + + return printDiagnostics(roots) +} + +// load loads the initial packages. +func load(patterns []string, allSyntax bool) ([]*packages.Package, error) { + mode := packages.LoadSyntax + if allSyntax { + mode = packages.LoadAllSyntax + } + conf := packages.Config{ + Mode: mode, + Tests: true, + } + initial, err := packages.Load(&conf, patterns...) + if err == nil { + if n := packages.PrintErrors(initial); n > 1 { + err = fmt.Errorf("%d errors during loading", n) + } else if n == 1 { + err = fmt.Errorf("error during loading") + } else if len(initial) == 0 { + err = fmt.Errorf("%s matched no packages", strings.Join(patterns, " ")) + } + } + + return initial, err +} + +// TestAnalyzer applies an analysis to a set of packages (and their +// dependencies if necessary) and returns the results. +// +// Facts about pkg are returned in a map keyed by object; package facts +// have a nil key. +// +// This entry point is used only by analysistest. +func TestAnalyzer(a *analysis.Analyzer, pkgs []*packages.Package) []*TestAnalyzerResult { + var results []*TestAnalyzerResult + for _, act := range analyze(pkgs, []*analysis.Analyzer{a}) { + facts := make(map[types.Object][]analysis.Fact) + for key, fact := range act.objectFacts { + if key.obj.Pkg() == act.pass.Pkg { + facts[key.obj] = append(facts[key.obj], fact) + } + } + for key, fact := range act.packageFacts { + if key.pkg == act.pass.Pkg { + facts[nil] = append(facts[nil], fact) + } + } + + results = append(results, &TestAnalyzerResult{act.pass, act.diagnostics, facts, act.result, act.err}) + } + return results +} + +type TestAnalyzerResult struct { + Pass *analysis.Pass + Diagnostics []analysis.Diagnostic + Facts map[types.Object][]analysis.Fact + Result interface{} + Err error +} + +func analyze(pkgs []*packages.Package, analyzers []*analysis.Analyzer) []*action { + // Construct the action graph. + if dbg('v') { + log.Printf("building graph of analysis passes") + } + + // Each graph node (action) is one unit of analysis. + // Edges express package-to-package (vertical) dependencies, + // and analysis-to-analysis (horizontal) dependencies. + type key struct { + *analysis.Analyzer + *packages.Package + } + actions := make(map[key]*action) + + var mkAction func(a *analysis.Analyzer, pkg *packages.Package) *action + mkAction = func(a *analysis.Analyzer, pkg *packages.Package) *action { + k := key{a, pkg} + act, ok := actions[k] + if !ok { + act = &action{a: a, pkg: pkg} + + // Add a dependency on each required analyzers. + for _, req := range a.Requires { + act.deps = append(act.deps, mkAction(req, pkg)) + } + + // An analysis that consumes/produces facts + // must run on the package's dependencies too. + if len(a.FactTypes) > 0 { + paths := make([]string, 0, len(pkg.Imports)) + for path := range pkg.Imports { + paths = append(paths, path) + } + sort.Strings(paths) // for determinism + for _, path := range paths { + dep := mkAction(a, pkg.Imports[path]) + act.deps = append(act.deps, dep) + } + } + + actions[k] = act + } + return act + } + + // Build nodes for initial packages. + var roots []*action + for _, a := range analyzers { + for _, pkg := range pkgs { + root := mkAction(a, pkg) + root.isroot = true + roots = append(roots, root) + } + } + + // Execute the graph in parallel. + execAll(roots) + + return roots +} + +// printDiagnostics prints the diagnostics for the root packages in either +// plain text or JSON format. JSON format also includes errors for any +// dependencies. +// +// It returns the exitcode: in plain mode, 0 for success, 1 for analysis +// errors, and 3 for diagnostics. We avoid 2 since the flag package uses +// it. JSON mode always succeeds at printing errors and diagnostics in a +// structured form to stdout. +func printDiagnostics(roots []*action) (exitcode int) { + // Print the output. + // + // Print diagnostics only for root packages, + // but errors for all packages. + printed := make(map[*action]bool) + var print func(*action) + var visitAll func(actions []*action) + visitAll = func(actions []*action) { + for _, act := range actions { + if !printed[act] { + printed[act] = true + visitAll(act.deps) + print(act) + } + } + } + + if analysisflags.JSON { + // JSON output + tree := make(analysisflags.JSONTree) + print = func(act *action) { + var diags []analysis.Diagnostic + if act.isroot { + diags = act.diagnostics + } + tree.Add(act.pkg.Fset, act.pkg.ID, act.a.Name, diags, act.err) + } + visitAll(roots) + tree.Print() + } else { + // plain text output + + // De-duplicate diagnostics by position (not token.Pos) to + // avoid double-reporting in source files that belong to + // multiple packages, such as foo and foo.test. + type key struct { + token.Position + *analysis.Analyzer + message string + } + seen := make(map[key]bool) + + print = func(act *action) { + if act.err != nil { + fmt.Fprintf(os.Stderr, "%s: %v\n", act.a.Name, act.err) + exitcode = 1 // analysis failed, at least partially + return + } + if act.isroot { + for _, diag := range act.diagnostics { + // We don't display a.Name/f.Category + // as most users don't care. + + posn := act.pkg.Fset.Position(diag.Pos) + k := key{posn, act.a, diag.Message} + if seen[k] { + continue // duplicate + } + seen[k] = true + + analysisflags.PrintPlain(act.pkg.Fset, diag) + } + } + } + visitAll(roots) + + if exitcode == 0 && len(seen) > 0 { + exitcode = 3 // successfuly produced diagnostics + } + } + + // Print timing info. + if dbg('t') { + if !dbg('p') { + log.Println("Warning: times are mostly GC/scheduler noise; use -debug=tp to disable parallelism") + } + var all []*action + var total time.Duration + for act := range printed { + all = append(all, act) + total += act.duration + } + sort.Slice(all, func(i, j int) bool { + return all[i].duration > all[j].duration + }) + + // Print actions accounting for 90% of the total. + var sum time.Duration + for _, act := range all { + fmt.Fprintf(os.Stderr, "%s\t%s\n", act.duration, act) + sum += act.duration + if sum >= total*9/10 { + break + } + } + } + + return exitcode +} + +// needFacts reports whether any analysis required by the specified set +// needs facts. If so, we must load the entire program from source. +func needFacts(analyzers []*analysis.Analyzer) bool { + seen := make(map[*analysis.Analyzer]bool) + var q []*analysis.Analyzer // for BFS + q = append(q, analyzers...) + for len(q) > 0 { + a := q[0] + q = q[1:] + if !seen[a] { + seen[a] = true + if len(a.FactTypes) > 0 { + return true + } + q = append(q, a.Requires...) + } + } + return false +} + +// An action represents one unit of analysis work: the application of +// one analysis to one package. Actions form a DAG, both within a +// package (as different analyzers are applied, either in sequence or +// parallel), and across packages (as dependencies are analyzed). +type action struct { + once sync.Once + a *analysis.Analyzer + pkg *packages.Package + pass *analysis.Pass + isroot bool + deps []*action + objectFacts map[objectFactKey]analysis.Fact + packageFacts map[packageFactKey]analysis.Fact + inputs map[*analysis.Analyzer]interface{} + result interface{} + diagnostics []analysis.Diagnostic + err error + duration time.Duration +} + +type objectFactKey struct { + obj types.Object + typ reflect.Type +} + +type packageFactKey struct { + pkg *types.Package + typ reflect.Type +} + +func (act *action) String() string { + return fmt.Sprintf("%s@%s", act.a, act.pkg) +} + +func execAll(actions []*action) { + sequential := dbg('p') + var wg sync.WaitGroup + for _, act := range actions { + wg.Add(1) + work := func(act *action) { + act.exec() + wg.Done() + } + if sequential { + work(act) + } else { + go work(act) + } + } + wg.Wait() +} + +func (act *action) exec() { act.once.Do(act.execOnce) } + +func (act *action) execOnce() { + // Analyze dependencies. + execAll(act.deps) + + // TODO(adonovan): uncomment this during profiling. + // It won't build pre-go1.11 but conditional compilation + // using build tags isn't warranted. + // + // ctx, task := trace.NewTask(context.Background(), "exec") + // trace.Log(ctx, "pass", act.String()) + // defer task.End() + + // Record time spent in this node but not its dependencies. + // In parallel mode, due to GC/scheduler contention, the + // time is 5x higher than in sequential mode, even with a + // semaphore limiting the number of threads here. + // So use -debug=tp. + if dbg('t') { + t0 := time.Now() + defer func() { act.duration = time.Since(t0) }() + } + + // Report an error if any dependency failed. + var failed []string + for _, dep := range act.deps { + if dep.err != nil { + failed = append(failed, dep.String()) + } + } + if failed != nil { + sort.Strings(failed) + act.err = fmt.Errorf("failed prerequisites: %s", strings.Join(failed, ", ")) + return + } + + // Plumb the output values of the dependencies + // into the inputs of this action. Also facts. + inputs := make(map[*analysis.Analyzer]interface{}) + act.objectFacts = make(map[objectFactKey]analysis.Fact) + act.packageFacts = make(map[packageFactKey]analysis.Fact) + for _, dep := range act.deps { + if dep.pkg == act.pkg { + // Same package, different analysis (horizontal edge): + // in-memory outputs of prerequisite analyzers + // become inputs to this analysis pass. + inputs[dep.a] = dep.result + + } else if dep.a == act.a { // (always true) + // Same analysis, different package (vertical edge): + // serialized facts produced by prerequisite analysis + // become available to this analysis pass. + inheritFacts(act, dep) + } + } + + // Run the analysis. + pass := &analysis.Pass{ + Analyzer: act.a, + Fset: act.pkg.Fset, + Files: act.pkg.Syntax, + OtherFiles: act.pkg.OtherFiles, + Pkg: act.pkg.Types, + TypesInfo: act.pkg.TypesInfo, + TypesSizes: act.pkg.TypesSizes, + ResultOf: inputs, + Report: func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) }, + ImportObjectFact: act.importObjectFact, + ExportObjectFact: act.exportObjectFact, + ImportPackageFact: act.importPackageFact, + ExportPackageFact: act.exportPackageFact, + AllObjectFacts: act.allObjectFacts, + AllPackageFacts: act.allPackageFacts, + } + act.pass = pass + + var err error + if act.pkg.IllTyped && !pass.Analyzer.RunDespiteErrors { + err = fmt.Errorf("analysis skipped due to errors in package") + } else { + act.result, err = pass.Analyzer.Run(pass) + if err == nil { + if got, want := reflect.TypeOf(act.result), pass.Analyzer.ResultType; got != want { + err = fmt.Errorf( + "internal error: on package %s, analyzer %s returned a result of type %v, but declared ResultType %v", + pass.Pkg.Path(), pass.Analyzer, got, want) + } + } + } + act.err = err + + // disallow calls after Run + pass.ExportObjectFact = nil + pass.ExportPackageFact = nil +} + +// inheritFacts populates act.facts with +// those it obtains from its dependency, dep. +func inheritFacts(act, dep *action) { + serialize := dbg('s') + + for key, fact := range dep.objectFacts { + // Filter out facts related to objects + // that are irrelevant downstream + // (equivalently: not in the compiler export data). + if !exportedFrom(key.obj, dep.pkg.Types) { + if false { + log.Printf("%v: discarding %T fact from %s for %s: %s", act, fact, dep, key.obj, fact) + } + continue + } + + // Optionally serialize/deserialize fact + // to verify that it works across address spaces. + if serialize { + encodedFact, err := codeFact(fact) + if err != nil { + log.Panicf("internal error: encoding of %T fact failed in %v", fact, act) + } + fact = encodedFact + } + + if false { + log.Printf("%v: inherited %T fact for %s: %s", act, fact, key.obj, fact) + } + act.objectFacts[key] = fact + } + + for key, fact := range dep.packageFacts { + // TODO: filter out facts that belong to + // packages not mentioned in the export data + // to prevent side channels. + + // Optionally serialize/deserialize fact + // to verify that it works across address spaces + // and is deterministic. + if serialize { + encodedFact, err := codeFact(fact) + if err != nil { + log.Panicf("internal error: encoding of %T fact failed in %v", fact, act) + } + fact = encodedFact + } + + if false { + log.Printf("%v: inherited %T fact for %s: %s", act, fact, key.pkg.Path(), fact) + } + act.packageFacts[key] = fact + } +} + +// codeFact encodes then decodes a fact, +// just to exercise that logic. +func codeFact(fact analysis.Fact) (analysis.Fact, error) { + // We encode facts one at a time. + // A real modular driver would emit all facts + // into one encoder to improve gob efficiency. + var buf bytes.Buffer + if err := gob.NewEncoder(&buf).Encode(fact); err != nil { + return nil, err + } + + // Encode it twice and assert that we get the same bits. + // This helps detect nondeterministic Gob encoding (e.g. of maps). + var buf2 bytes.Buffer + if err := gob.NewEncoder(&buf2).Encode(fact); err != nil { + return nil, err + } + if !bytes.Equal(buf.Bytes(), buf2.Bytes()) { + return nil, fmt.Errorf("encoding of %T fact is nondeterministic", fact) + } + + new := reflect.New(reflect.TypeOf(fact).Elem()).Interface().(analysis.Fact) + if err := gob.NewDecoder(&buf).Decode(new); err != nil { + return nil, err + } + return new, nil +} + +// exportedFrom reports whether obj may be visible to a package that imports pkg. +// This includes not just the exported members of pkg, but also unexported +// constants, types, fields, and methods, perhaps belonging to oether packages, +// that find there way into the API. +// This is an overapproximation of the more accurate approach used by +// gc export data, which walks the type graph, but it's much simpler. +// +// TODO(adonovan): do more accurate filtering by walking the type graph. +func exportedFrom(obj types.Object, pkg *types.Package) bool { + switch obj := obj.(type) { + case *types.Func: + return obj.Exported() && obj.Pkg() == pkg || + obj.Type().(*types.Signature).Recv() != nil + case *types.Var: + return obj.Exported() && obj.Pkg() == pkg || + obj.IsField() + case *types.TypeName, *types.Const: + return true + } + return false // Nil, Builtin, Label, or PkgName +} + +// importObjectFact implements Pass.ImportObjectFact. +// Given a non-nil pointer ptr of type *T, where *T satisfies Fact, +// importObjectFact copies the fact value to *ptr. +func (act *action) importObjectFact(obj types.Object, ptr analysis.Fact) bool { + if obj == nil { + panic("nil object") + } + key := objectFactKey{obj, factType(ptr)} + if v, ok := act.objectFacts[key]; ok { + reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem()) + return true + } + return false +} + +// exportObjectFact implements Pass.ExportObjectFact. +func (act *action) exportObjectFact(obj types.Object, fact analysis.Fact) { + if act.pass.ExportObjectFact == nil { + log.Panicf("%s: Pass.ExportObjectFact(%s, %T) called after Run", act, obj, fact) + } + + if obj.Pkg() != act.pkg.Types { + log.Panicf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package", + act.a, act.pkg, obj, fact) + } + + key := objectFactKey{obj, factType(fact)} + act.objectFacts[key] = fact // clobber any existing entry + if dbg('f') { + objstr := types.ObjectString(obj, (*types.Package).Name) + fmt.Fprintf(os.Stderr, "%s: object %s has fact %s\n", + act.pkg.Fset.Position(obj.Pos()), objstr, fact) + } +} + +// allObjectFacts implements Pass.AllObjectFacts. +func (act *action) allObjectFacts() []analysis.ObjectFact { + facts := make([]analysis.ObjectFact, 0, len(act.objectFacts)) + for k := range act.objectFacts { + facts = append(facts, analysis.ObjectFact{k.obj, act.objectFacts[k]}) + } + return facts +} + +// importPackageFact implements Pass.ImportPackageFact. +// Given a non-nil pointer ptr of type *T, where *T satisfies Fact, +// fact copies the fact value to *ptr. +func (act *action) importPackageFact(pkg *types.Package, ptr analysis.Fact) bool { + if pkg == nil { + panic("nil package") + } + key := packageFactKey{pkg, factType(ptr)} + if v, ok := act.packageFacts[key]; ok { + reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem()) + return true + } + return false +} + +// exportPackageFact implements Pass.ExportPackageFact. +func (act *action) exportPackageFact(fact analysis.Fact) { + if act.pass.ExportPackageFact == nil { + log.Panicf("%s: Pass.ExportPackageFact(%T) called after Run", act, fact) + } + + key := packageFactKey{act.pass.Pkg, factType(fact)} + act.packageFacts[key] = fact // clobber any existing entry + if dbg('f') { + fmt.Fprintf(os.Stderr, "%s: package %s has fact %s\n", + act.pkg.Fset.Position(act.pass.Files[0].Pos()), act.pass.Pkg.Path(), fact) + } +} + +func factType(fact analysis.Fact) reflect.Type { + t := reflect.TypeOf(fact) + if t.Kind() != reflect.Ptr { + log.Fatalf("invalid Fact type: got %T, want pointer", t) + } + return t +} + +// allObjectFacts implements Pass.AllObjectFacts. +func (act *action) allPackageFacts() []analysis.PackageFact { + facts := make([]analysis.PackageFact, 0, len(act.packageFacts)) + for k := range act.packageFacts { + facts = append(facts, analysis.PackageFact{k.pkg, act.packageFacts[k]}) + } + return facts +} + +func dbg(b byte) bool { return strings.IndexByte(Debug, b) >= 0 } diff --git a/vendor/golang.org/x/tools/go/analysis/internal/facts/facts.go b/vendor/golang.org/x/tools/go/analysis/internal/facts/facts.go new file mode 100644 index 00000000000..468f148900f --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/internal/facts/facts.go @@ -0,0 +1,299 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package facts defines a serializable set of analysis.Fact. +// +// It provides a partial implementation of the Fact-related parts of the +// analysis.Pass interface for use in analysis drivers such as "go vet" +// and other build systems. +// +// The serial format is unspecified and may change, so the same version +// of this package must be used for reading and writing serialized facts. +// +// The handling of facts in the analysis system parallels the handling +// of type information in the compiler: during compilation of package P, +// the compiler emits an export data file that describes the type of +// every object (named thing) defined in package P, plus every object +// indirectly reachable from one of those objects. Thus the downstream +// compiler of package Q need only load one export data file per direct +// import of Q, and it will learn everything about the API of package P +// and everything it needs to know about the API of P's dependencies. +// +// Similarly, analysis of package P emits a fact set containing facts +// about all objects exported from P, plus additional facts about only +// those objects of P's dependencies that are reachable from the API of +// package P; the downstream analysis of Q need only load one fact set +// per direct import of Q. +// +// The notion of "exportedness" that matters here is that of the +// compiler. According to the language spec, a method pkg.T.f is +// unexported simply because its name starts with lowercase. But the +// compiler must nonethless export f so that downstream compilations can +// accurately ascertain whether pkg.T implements an interface pkg.I +// defined as interface{f()}. Exported thus means "described in export +// data". +// +package facts + +import ( + "bytes" + "encoding/gob" + "fmt" + "go/types" + "io/ioutil" + "log" + "reflect" + "sort" + "sync" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/types/objectpath" +) + +const debug = false + +// A Set is a set of analysis.Facts. +// +// Decode creates a Set of facts by reading from the imports of a given +// package, and Encode writes out the set. Between these operation, +// the Import and Export methods will query and update the set. +// +// All of Set's methods except String are safe to call concurrently. +type Set struct { + pkg *types.Package + mu sync.Mutex + m map[key]analysis.Fact +} + +type key struct { + pkg *types.Package + obj types.Object // (object facts only) + t reflect.Type +} + +// ImportObjectFact implements analysis.Pass.ImportObjectFact. +func (s *Set) ImportObjectFact(obj types.Object, ptr analysis.Fact) bool { + if obj == nil { + panic("nil object") + } + key := key{pkg: obj.Pkg(), obj: obj, t: reflect.TypeOf(ptr)} + s.mu.Lock() + defer s.mu.Unlock() + if v, ok := s.m[key]; ok { + reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem()) + return true + } + return false +} + +// ExportObjectFact implements analysis.Pass.ExportObjectFact. +func (s *Set) ExportObjectFact(obj types.Object, fact analysis.Fact) { + if obj.Pkg() != s.pkg { + log.Panicf("in package %s: ExportObjectFact(%s, %T): can't set fact on object belonging another package", + s.pkg, obj, fact) + } + key := key{pkg: obj.Pkg(), obj: obj, t: reflect.TypeOf(fact)} + s.mu.Lock() + s.m[key] = fact // clobber any existing entry + s.mu.Unlock() +} + +// ImportPackageFact implements analysis.Pass.ImportPackageFact. +func (s *Set) ImportPackageFact(pkg *types.Package, ptr analysis.Fact) bool { + if pkg == nil { + panic("nil package") + } + key := key{pkg: pkg, t: reflect.TypeOf(ptr)} + s.mu.Lock() + defer s.mu.Unlock() + if v, ok := s.m[key]; ok { + reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem()) + return true + } + return false +} + +// ExportPackageFact implements analysis.Pass.ExportPackageFact. +func (s *Set) ExportPackageFact(fact analysis.Fact) { + key := key{pkg: s.pkg, t: reflect.TypeOf(fact)} + s.mu.Lock() + s.m[key] = fact // clobber any existing entry + s.mu.Unlock() +} + +// gobFact is the Gob declaration of a serialized fact. +type gobFact struct { + PkgPath string // path of package + Object objectpath.Path // optional path of object relative to package itself + Fact analysis.Fact // type and value of user-defined Fact +} + +// Decode decodes all the facts relevant to the analysis of package pkg. +// The read function reads serialized fact data from an external source +// for one of of pkg's direct imports. The empty file is a valid +// encoding of an empty fact set. +// +// It is the caller's responsibility to call gob.Register on all +// necessary fact types. +func Decode(pkg *types.Package, read func(packagePath string) ([]byte, error)) (*Set, error) { + // Compute the import map for this package. + // See the package doc comment. + packages := importMap(pkg.Imports()) + + // Read facts from imported packages. + // Facts may describe indirectly imported packages, or their objects. + m := make(map[key]analysis.Fact) // one big bucket + for _, imp := range pkg.Imports() { + logf := func(format string, args ...interface{}) { + if debug { + prefix := fmt.Sprintf("in %s, importing %s: ", + pkg.Path(), imp.Path()) + log.Print(prefix, fmt.Sprintf(format, args...)) + } + } + + // Read the gob-encoded facts. + data, err := read(imp.Path()) + if err != nil { + return nil, fmt.Errorf("in %s, can't import facts for package %q: %v", + pkg.Path(), imp.Path(), err) + } + if len(data) == 0 { + continue // no facts + } + var gobFacts []gobFact + if err := gob.NewDecoder(bytes.NewReader(data)).Decode(&gobFacts); err != nil { + return nil, fmt.Errorf("decoding facts for %q: %v", imp.Path(), err) + } + if debug { + logf("decoded %d facts: %v", len(gobFacts), gobFacts) + } + + // Parse each one into a key and a Fact. + for _, f := range gobFacts { + factPkg := packages[f.PkgPath] + if factPkg == nil { + // Fact relates to a dependency that was + // unused in this translation unit. Skip. + logf("no package %q; discarding %v", f.PkgPath, f.Fact) + continue + } + key := key{pkg: factPkg, t: reflect.TypeOf(f.Fact)} + if f.Object != "" { + // object fact + obj, err := objectpath.Object(factPkg, f.Object) + if err != nil { + // (most likely due to unexported object) + // TODO(adonovan): audit for other possibilities. + logf("no object for path: %v; discarding %s", err, f.Fact) + continue + } + key.obj = obj + logf("read %T fact %s for %v", f.Fact, f.Fact, key.obj) + } else { + // package fact + logf("read %T fact %s for %v", f.Fact, f.Fact, factPkg) + } + m[key] = f.Fact + } + } + + return &Set{pkg: pkg, m: m}, nil +} + +// Encode encodes a set of facts to a memory buffer. +// +// It may fail if one of the Facts could not be gob-encoded, but this is +// a sign of a bug in an Analyzer. +func (s *Set) Encode() []byte { + + // TODO(adonovan): opt: use a more efficient encoding + // that avoids repeating PkgPath for each fact. + + // Gather all facts, including those from imported packages. + var gobFacts []gobFact + + s.mu.Lock() + for k, fact := range s.m { + if debug { + log.Printf("%v => %s\n", k, fact) + } + var object objectpath.Path + if k.obj != nil { + path, err := objectpath.For(k.obj) + if err != nil { + if debug { + log.Printf("discarding fact %s about %s\n", fact, k.obj) + } + continue // object not accessible from package API; discard fact + } + object = path + } + gobFacts = append(gobFacts, gobFact{ + PkgPath: k.pkg.Path(), + Object: object, + Fact: fact, + }) + } + s.mu.Unlock() + + // Sort facts by (package, object, type) for determinism. + sort.Slice(gobFacts, func(i, j int) bool { + x, y := gobFacts[i], gobFacts[j] + if x.PkgPath != y.PkgPath { + return x.PkgPath < y.PkgPath + } + if x.Object != y.Object { + return x.Object < y.Object + } + tx := reflect.TypeOf(x.Fact) + ty := reflect.TypeOf(y.Fact) + if tx != ty { + return tx.String() < ty.String() + } + return false // equal + }) + + var buf bytes.Buffer + if len(gobFacts) > 0 { + if err := gob.NewEncoder(&buf).Encode(gobFacts); err != nil { + // Fact encoding should never fail. Identify the culprit. + for _, gf := range gobFacts { + if err := gob.NewEncoder(ioutil.Discard).Encode(gf); err != nil { + fact := gf.Fact + pkgpath := reflect.TypeOf(fact).Elem().PkgPath() + log.Panicf("internal error: gob encoding of analysis fact %s failed: %v; please report a bug against fact %T in package %q", + fact, err, fact, pkgpath) + } + } + } + } + + if debug { + log.Printf("package %q: encode %d facts, %d bytes\n", + s.pkg.Path(), len(gobFacts), buf.Len()) + } + + return buf.Bytes() +} + +// String is provided only for debugging, and must not be called +// concurrent with any Import/Export method. +func (s *Set) String() string { + var buf bytes.Buffer + buf.WriteString("{") + for k, f := range s.m { + if buf.Len() > 1 { + buf.WriteString(", ") + } + if k.obj != nil { + buf.WriteString(k.obj.String()) + } else { + buf.WriteString(k.pkg.Path()) + } + fmt.Fprintf(&buf, ": %v", f) + } + buf.WriteString("}") + return buf.String() +} diff --git a/vendor/golang.org/x/tools/go/analysis/internal/facts/imports.go b/vendor/golang.org/x/tools/go/analysis/internal/facts/imports.go new file mode 100644 index 00000000000..34740f48e04 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/internal/facts/imports.go @@ -0,0 +1,88 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package facts + +import "go/types" + +// importMap computes the import map for a package by traversing the +// entire exported API each of its imports. +// +// This is a workaround for the fact that we cannot access the map used +// internally by the types.Importer returned by go/importer. The entries +// in this map are the packages and objects that may be relevant to the +// current analysis unit. +// +// Packages in the map that are only indirectly imported may be +// incomplete (!pkg.Complete()). +// +func importMap(imports []*types.Package) map[string]*types.Package { + objects := make(map[types.Object]bool) + packages := make(map[string]*types.Package) + + var addObj func(obj types.Object) bool + var addType func(T types.Type) + + addObj = func(obj types.Object) bool { + if !objects[obj] { + objects[obj] = true + addType(obj.Type()) + if pkg := obj.Pkg(); pkg != nil { + packages[pkg.Path()] = pkg + } + return true + } + return false + } + + addType = func(T types.Type) { + switch T := T.(type) { + case *types.Basic: + // nop + case *types.Named: + if addObj(T.Obj()) { + for i := 0; i < T.NumMethods(); i++ { + addObj(T.Method(i)) + } + } + case *types.Pointer: + addType(T.Elem()) + case *types.Slice: + addType(T.Elem()) + case *types.Array: + addType(T.Elem()) + case *types.Chan: + addType(T.Elem()) + case *types.Map: + addType(T.Key()) + addType(T.Elem()) + case *types.Signature: + addType(T.Params()) + addType(T.Results()) + case *types.Struct: + for i := 0; i < T.NumFields(); i++ { + addObj(T.Field(i)) + } + case *types.Tuple: + for i := 0; i < T.Len(); i++ { + addObj(T.At(i)) + } + case *types.Interface: + for i := 0; i < T.NumMethods(); i++ { + addObj(T.Method(i)) + } + } + } + + for _, imp := range imports { + packages[imp.Path()] = imp + + scope := imp.Scope() + for _, name := range scope.Names() { + addObj(scope.Lookup(name)) + } + } + + return packages +} diff --git a/vendor/golang.org/x/tools/go/analysis/multichecker/multichecker.go b/vendor/golang.org/x/tools/go/analysis/multichecker/multichecker.go new file mode 100644 index 00000000000..3c62be58189 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/multichecker/multichecker.go @@ -0,0 +1,60 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package multichecker defines the main function for an analysis driver +// with several analyzers. This package makes it easy for anyone to build +// an analysis tool containing just the analyzers they need. +package multichecker + +import ( + "flag" + "fmt" + "log" + "os" + "path/filepath" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/internal/analysisflags" + "golang.org/x/tools/go/analysis/internal/checker" + "golang.org/x/tools/go/analysis/unitchecker" +) + +func Main(analyzers ...*analysis.Analyzer) { + progname := filepath.Base(os.Args[0]) + log.SetFlags(0) + log.SetPrefix(progname + ": ") // e.g. "vet: " + + if err := analysis.Validate(analyzers); err != nil { + log.Fatal(err) + } + + checker.RegisterFlags() + + analyzers = analysisflags.Parse(analyzers, true) + + args := flag.Args() + if len(args) == 0 { + fmt.Fprintf(os.Stderr, `%[1]s is a tool for static analysis of Go programs. + +Usage: %[1]s [-flag] [package] + +Run '%[1]s help' for more detail, + or '%[1]s help name' for details and flags of a specific analyzer. +`, progname) + os.Exit(1) + } + + if args[0] == "help" { + analysisflags.Help(progname, analyzers, args[1:]) + os.Exit(0) + } + + if len(args) == 1 && strings.HasSuffix(args[0], ".cfg") { + unitchecker.Run(args[0], analyzers) + panic("unreachable") + } + + os.Exit(checker.Run(args, analyzers)) +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go b/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go index 6403d7783a2..d41c4e97e32 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go @@ -130,7 +130,7 @@ var ( asmPlusBuild = re(`//\s+\+build\s+([^\n]+)`) asmTEXT = re(`\bTEXT\b(.*)·([^\(]+)\(SB\)(?:\s*,\s*([0-9A-Z|+()]+))?(?:\s*,\s*\$(-?[0-9]+)(?:-([0-9]+))?)?`) asmDATA = re(`\b(DATA|GLOBL)\b`) - asmNamedFP = re(`([a-zA-Z0-9_\xFF-\x{10FFFF}]+)(?:\+([0-9]+))\(FP\)`) + asmNamedFP = re(`\$?([a-zA-Z0-9_\xFF-\x{10FFFF}]+)(?:\+([0-9]+))\(FP\)`) asmUnnamedFP = re(`[^+\-0-9](([0-9]+)\(FP\))`) asmSP = re(`[^+\-0-9](([0-9]+)\(([A-Z0-9]+)\))`) asmOpcode = re(`^\s*(?:[A-Z0-9a-z_]+:)?\s*([A-Z]+)\s*([^,]*)(?:,\s*(.*))?`) @@ -184,6 +184,7 @@ Files: fnName string localSize, argSize int wroteSP bool + noframe bool haveRetArg bool retLine []int ) @@ -231,6 +232,11 @@ Files: } } + // Ignore comments and commented-out code. + if i := strings.Index(line, "//"); i >= 0 { + line = line[:i] + } + if m := asmTEXT.FindStringSubmatch(line); m != nil { flushRet() if arch == "" { @@ -254,7 +260,7 @@ Files: // identifiers to represent the directory separator. pkgPath = strings.Replace(pkgPath, "∕", "/", -1) if pkgPath != pass.Pkg.Path() { - log.Printf("%s:%d: [%s] cannot check cross-package assembly function: %s is in package %s", fname, lineno, arch, fnName, pkgPath) + // log.Printf("%s:%d: [%s] cannot check cross-package assembly function: %s is in package %s", fname, lineno, arch, fnName, pkgPath) fn = nil fnName = "" continue @@ -275,7 +281,8 @@ Files: localSize += archDef.intSize } argSize, _ = strconv.Atoi(m[5]) - if fn == nil && !strings.Contains(fnName, "<>") { + noframe = strings.Contains(flag, "NOFRAME") + if fn == nil && !strings.Contains(fnName, "<>") && !noframe { badf("function %s missing Go declaration", fnName) } wroteSP = false @@ -305,13 +312,18 @@ Files: continue } - if strings.Contains(line, ", "+archDef.stack) || strings.Contains(line, ",\t"+archDef.stack) { + if strings.Contains(line, ", "+archDef.stack) || strings.Contains(line, ",\t"+archDef.stack) || strings.Contains(line, "NOP "+archDef.stack) || strings.Contains(line, "NOP\t"+archDef.stack) { wroteSP = true continue } + if arch == "wasm" && strings.Contains(line, "CallImport") { + // CallImport is a call out to magic that can write the result. + haveRetArg = true + } + for _, m := range asmSP.FindAllStringSubmatch(line, -1) { - if m[3] != archDef.stack || wroteSP { + if m[3] != archDef.stack || wroteSP || noframe { continue } off := 0 @@ -371,7 +383,7 @@ Files: } continue } - asmCheckVar(badf, fn, line, m[0], off, v) + asmCheckVar(badf, fn, line, m[0], off, v, archDef) } } flushRet() @@ -589,7 +601,7 @@ func asmParseDecl(pass *analysis.Pass, decl *ast.FuncDecl) map[string]*asmFunc { } // asmCheckVar checks a single variable reference. -func asmCheckVar(badf func(string, ...interface{}), fn *asmFunc, line, expr string, off int, v *asmVar) { +func asmCheckVar(badf func(string, ...interface{}), fn *asmFunc, line, expr string, off int, v *asmVar, archDef *asmArch) { m := asmOpcode.FindStringSubmatch(line) if m == nil { if !strings.HasPrefix(strings.TrimSpace(line), "//") { @@ -598,6 +610,8 @@ func asmCheckVar(badf func(string, ...interface{}), fn *asmFunc, line, expr stri return } + addr := strings.HasPrefix(expr, "$") + // Determine operand sizes from instruction. // Typically the suffix suffices, but there are exceptions. var src, dst, kind asmKind @@ -617,10 +631,13 @@ func asmCheckVar(badf func(string, ...interface{}), fn *asmFunc, line, expr stri // They just take the address of it. case "386.LEAL": dst = 4 + addr = true case "amd64.LEAQ": dst = 8 + addr = true case "amd64p32.LEAL": dst = 4 + addr = true default: switch fn.arch.name { case "386", "amd64": @@ -725,6 +742,11 @@ func asmCheckVar(badf func(string, ...interface{}), fn *asmFunc, line, expr stri vs = v.inner[0].size vt = v.inner[0].typ } + if addr { + vk = asmKind(archDef.ptrSize) + vs = archDef.ptrSize + vt = "address" + } if off != v.off { var inner bytes.Buffer diff --git a/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go b/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go index b5161836a57..e88cf57d8f7 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go @@ -45,6 +45,8 @@ var contextPackage = "context" // control-flow path from the call to a return statement and that path // does not "use" the cancel function. Any reference to the variable // counts as a use, even within a nested function literal. +// If the variable's scope is larger than the function +// containing the assignment, we assume that other uses exist. // // checkLostCancel analyzes a single named or literal function. func run(pass *analysis.Pass) (interface{}, error) { @@ -66,6 +68,15 @@ func run(pass *analysis.Pass) (interface{}, error) { } func runFunc(pass *analysis.Pass, node ast.Node) { + // Find scope of function node + var funcScope *types.Scope + switch v := node.(type) { + case *ast.FuncLit: + funcScope = pass.TypesInfo.Scopes[v.Type] + case *ast.FuncDecl: + funcScope = pass.TypesInfo.Scopes[v.Type] + } + // Maps each cancel variable to its defining ValueSpec/AssignStmt. cancelvars := make(map[*types.Var]ast.Node) @@ -114,7 +125,11 @@ func runFunc(pass *analysis.Pass, node ast.Node) { "the cancel function returned by context.%s should be called, not discarded, to avoid a context leak", n.(*ast.SelectorExpr).Sel.Name) } else if v, ok := pass.TypesInfo.Uses[id].(*types.Var); ok { - cancelvars[v] = stmt + // If the cancel variable is defined outside function scope, + // do not analyze it. + if funcScope.Contains(v.Pos()) { + cancelvars[v] = stmt + } } else if v, ok := pass.TypesInfo.Defs[id].(*types.Var); ok { cancelvars[v] = stmt } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go b/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go index d4697eac0c4..f59e95dc219 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go @@ -856,20 +856,28 @@ func recursiveStringer(pass *analysis.Pass, e ast.Expr) bool { return false } - // Is it the receiver r, or &r? - recv := stringMethod.Type().(*types.Signature).Recv() - if recv == nil { + sig := stringMethod.Type().(*types.Signature) + if !isStringer(sig) { return false } + + // Is it the receiver r, or &r? if u, ok := e.(*ast.UnaryExpr); ok && u.Op == token.AND { e = u.X // strip off & from &r } if id, ok := e.(*ast.Ident); ok { - return pass.TypesInfo.Uses[id] == recv + return pass.TypesInfo.Uses[id] == sig.Recv() } return false } +// isStringer reports whether the method signature matches the String() definition in fmt.Stringer. +func isStringer(sig *types.Signature) bool { + return sig.Params().Len() == 0 && + sig.Results().Len() == 1 && + sig.Results().At(0).Type() == types.Typ[types.String] +} + // isFunctionValue reports whether the expression is a function as opposed to a function call. // It is almost always a mistake to print a function value. func isFunctionValue(pass *analysis.Pass, e ast.Expr) bool { diff --git a/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go b/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go index 83495112243..bc1db7e4c2e 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go @@ -8,7 +8,6 @@ package stdmethods import ( "go/ast" - "go/token" "go/types" "strings" @@ -117,6 +116,13 @@ func canonicalMethod(pass *analysis.Pass, id *ast.Ident) { args := sign.Params() results := sign.Results() + // Special case: WriteTo with more than one argument, + // not trying at all to implement io.WriterTo, + // comes up often enough to skip. + if id.Name == "WriteTo" && args.Len() > 1 { + return + } + // Do the =s (if any) all match? if !matchParams(pass, expect.args, args, "=") || !matchParams(pass, expect.results, results, "=") { return @@ -163,7 +169,7 @@ func matchParams(pass *analysis.Pass, expect []string, actual *types.Tuple, pref if i >= actual.Len() { return false } - if !matchParamType(pass.Fset, pass.Pkg, x, actual.At(i).Type()) { + if !matchParamType(x, actual.At(i).Type()) { return false } } @@ -174,13 +180,8 @@ func matchParams(pass *analysis.Pass, expect []string, actual *types.Tuple, pref } // Does this one type match? -func matchParamType(fset *token.FileSet, pkg *types.Package, expect string, actual types.Type) bool { +func matchParamType(expect string, actual types.Type) bool { expect = strings.TrimPrefix(expect, "=") - // Strip package name if we're in that package. - if n := len(pkg.Name()); len(expect) > n && expect[:n] == pkg.Name() && expect[n] == '.' { - expect = expect[n+1:] - } - // Overkill but easy. return typeString(actual) == expect } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go b/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go index 2b67c376bab..bcdb0429200 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go @@ -56,6 +56,13 @@ var checkTagSpaces = map[string]bool{"json": true, "xml": true, "asn1": true} // checkCanonicalFieldTag checks a single struct field tag. func checkCanonicalFieldTag(pass *analysis.Pass, field *types.Var, tag string, seen *map[[2]string]token.Pos) { + switch pass.Pkg.Path() { + case "encoding/json", "encoding/xml": + // These packages know how to use their own APIs. + // Sometimes they are testing what happens to incorrect programs. + return + } + for _, key := range checkTagDups { checkTagDuplicates(pass, tag, key, field, field, seen) } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go b/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go index 35b0a3e7cc2..5dd060800cc 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go @@ -84,23 +84,25 @@ func isTestParam(typ ast.Expr, wantType string) bool { return false } -func lookup(pkg *types.Package, name string) types.Object { +func lookup(pkg *types.Package, name string) []types.Object { if o := pkg.Scope().Lookup(name); o != nil { - return o - } - - // If this package is ".../foo_test" and it imports a package - // ".../foo", try looking in the latter package. - // This heuristic should work even on build systems that do not - // record any special link between the packages. - if basePath := strings.TrimSuffix(pkg.Path(), "_test"); basePath != pkg.Path() { - for _, imp := range pkg.Imports() { - if imp.Path() == basePath { - return imp.Scope().Lookup(name) - } + return []types.Object{o} + } + + var ret []types.Object + // Search through the imports to see if any of them define name. + // It's hard to tell in general which package is being tested, so + // for the purposes of the analysis, allow the object to appear + // in any of the imports. This guarantees there are no false positives + // because the example needs to use the object so it must be defined + // in the package or one if its imports. On the other hand, false + // negatives are possible, but should be rare. + for _, imp := range pkg.Imports() { + if obj := imp.Scope().Lookup(name); obj != nil { + ret = append(ret, obj) } } - return nil + return ret } func checkExample(pass *analysis.Pass, fn *ast.FuncDecl) { @@ -121,9 +123,9 @@ func checkExample(pass *analysis.Pass, fn *ast.FuncDecl) { exName = strings.TrimPrefix(fnName, "Example") elems = strings.SplitN(exName, "_", 3) ident = elems[0] - obj = lookup(pass.Pkg, ident) + objs = lookup(pass.Pkg, ident) ) - if ident != "" && obj == nil { + if ident != "" && len(objs) == 0 { // Check ExampleFoo and ExampleBadFoo. pass.Reportf(fn.Pos(), "%s refers to unknown identifier: %s", fnName, ident) // Abort since obj is absent and no subsequent checks can be performed. @@ -145,7 +147,15 @@ func checkExample(pass *analysis.Pass, fn *ast.FuncDecl) { mmbr := elems[1] if !isExampleSuffix(mmbr) { // Check ExampleFoo_Method and ExampleFoo_BadMethod. - if obj, _, _ := types.LookupFieldOrMethod(obj.Type(), true, obj.Pkg(), mmbr); obj == nil { + found := false + // Check if Foo.Method exists in this package or its imports. + for _, obj := range objs { + if obj, _, _ := types.LookupFieldOrMethod(obj.Type(), true, obj.Pkg(), mmbr); obj != nil { + found = true + break + } + } + if !found { pass.Reportf(fn.Pos(), "%s refers to unknown field or method: %s.%s", fnName, ident, mmbr) } } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go b/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go index 6cf4358ab9a..d019ecef15a 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go @@ -29,6 +29,13 @@ var Analyzer = &analysis.Analyzer{ } func run(pass *analysis.Pass) (interface{}, error) { + switch pass.Pkg.Path() { + case "encoding/gob", "encoding/json", "encoding/xml": + // These packages know how to use their own APIs. + // Sometimes they are testing what happens to incorrect programs. + return nil, nil + } + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) nodeFilter := []ast.Node{ diff --git a/vendor/golang.org/x/tools/go/analysis/unitchecker/main.go b/vendor/golang.org/x/tools/go/analysis/unitchecker/main.go new file mode 100644 index 00000000000..844e8f3dac2 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/unitchecker/main.go @@ -0,0 +1,64 @@ +// +build ignore + +// This file provides an example command for static checkers +// conforming to the golang.org/x/tools/go/analysis API. +// It serves as a model for the behavior of the cmd/vet tool in $GOROOT. +// Being based on the unitchecker driver, it must be run by go vet: +// +// $ go build -o unitchecker main.go +// $ go vet -vettool=unitchecker my/project/... +// +// For a checker also capable of running standalone, use multichecker. +package main + +import ( + "golang.org/x/tools/go/analysis/unitchecker" + + "golang.org/x/tools/go/analysis/passes/asmdecl" + "golang.org/x/tools/go/analysis/passes/assign" + "golang.org/x/tools/go/analysis/passes/atomic" + "golang.org/x/tools/go/analysis/passes/bools" + "golang.org/x/tools/go/analysis/passes/buildtag" + "golang.org/x/tools/go/analysis/passes/cgocall" + "golang.org/x/tools/go/analysis/passes/composite" + "golang.org/x/tools/go/analysis/passes/copylock" + "golang.org/x/tools/go/analysis/passes/httpresponse" + "golang.org/x/tools/go/analysis/passes/loopclosure" + "golang.org/x/tools/go/analysis/passes/lostcancel" + "golang.org/x/tools/go/analysis/passes/nilfunc" + "golang.org/x/tools/go/analysis/passes/printf" + "golang.org/x/tools/go/analysis/passes/shift" + "golang.org/x/tools/go/analysis/passes/stdmethods" + "golang.org/x/tools/go/analysis/passes/structtag" + "golang.org/x/tools/go/analysis/passes/tests" + "golang.org/x/tools/go/analysis/passes/unmarshal" + "golang.org/x/tools/go/analysis/passes/unreachable" + "golang.org/x/tools/go/analysis/passes/unsafeptr" + "golang.org/x/tools/go/analysis/passes/unusedresult" +) + +func main() { + unitchecker.Main( + asmdecl.Analyzer, + assign.Analyzer, + atomic.Analyzer, + bools.Analyzer, + buildtag.Analyzer, + cgocall.Analyzer, + composite.Analyzer, + copylock.Analyzer, + httpresponse.Analyzer, + loopclosure.Analyzer, + lostcancel.Analyzer, + nilfunc.Analyzer, + printf.Analyzer, + shift.Analyzer, + stdmethods.Analyzer, + structtag.Analyzer, + tests.Analyzer, + unmarshal.Analyzer, + unreachable.Analyzer, + unsafeptr.Analyzer, + unusedresult.Analyzer, + ) +} diff --git a/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go b/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go new file mode 100644 index 00000000000..ba2e66fed2f --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go @@ -0,0 +1,388 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The unitchecker package defines the main function for an analysis +// driver that analyzes a single compilation unit during a build. +// It is invoked by a build system such as "go vet": +// +// $ go vet -vettool=$(which vet) +// +// It supports the following command-line protocol: +// +// -V=full describe executable (to the build tool) +// -flags describe flags (to the build tool) +// foo.cfg description of compilation unit (from the build tool) +// +// This package does not depend on go/packages. +// If you need a standalone tool, use multichecker, +// which supports this mode but can also load packages +// from source using go/packages. +package unitchecker + +// TODO(adonovan): +// - with gccgo, go build does not build standard library, +// so we will not get to analyze it. Yet we must in order +// to create base facts for, say, the fmt package for the +// printf checker. + +import ( + "encoding/gob" + "encoding/json" + "flag" + "fmt" + "go/ast" + "go/build" + "go/importer" + "go/parser" + "go/token" + "go/types" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "sort" + "strings" + "sync" + "time" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/internal/analysisflags" + "golang.org/x/tools/go/analysis/internal/facts" +) + +// A Config describes a compilation unit to be analyzed. +// It is provided to the tool in a JSON-encoded file +// whose name ends with ".cfg". +type Config struct { + ID string // e.g. "fmt [fmt.test]" + Compiler string + Dir string + ImportPath string + GoFiles []string + NonGoFiles []string + ImportMap map[string]string + PackageFile map[string]string + Standard map[string]bool + PackageVetx map[string]string + VetxOnly bool + VetxOutput string + SucceedOnTypecheckFailure bool +} + +// Main is the main function of a vet-like analysis tool that must be +// invoked by a build system to analyze a single package. +// +// The protocol required by 'go vet -vettool=...' is that the tool must support: +// +// -flags describe flags in JSON +// -V=full describe executable for build caching +// foo.cfg perform separate modular analyze on the single +// unit described by a JSON config file foo.cfg. +// +func Main(analyzers ...*analysis.Analyzer) { + progname := filepath.Base(os.Args[0]) + log.SetFlags(0) + log.SetPrefix(progname + ": ") + + if err := analysis.Validate(analyzers); err != nil { + log.Fatal(err) + } + + flag.Usage = func() { + fmt.Fprintf(os.Stderr, `%[1]s is a tool for static analysis of Go programs. + +Usage of %[1]s: + %.16[1]s unit.cfg # execute analysis specified by config file + %.16[1]s help # general help + %.16[1]s help name # help on specific analyzer and its flags +`, progname) + os.Exit(1) + } + + analyzers = analysisflags.Parse(analyzers, true) + + args := flag.Args() + if len(args) == 0 { + flag.Usage() + } + if args[0] == "help" { + analysisflags.Help(progname, analyzers, args[1:]) + os.Exit(0) + } + if len(args) != 1 || !strings.HasSuffix(args[0], ".cfg") { + log.Fatalf(`invoking "go tool vet" directly is unsupported; use "go vet"`) + } + Run(args[0], analyzers) +} + +// Run reads the *.cfg file, runs the analysis, +// and calls os.Exit with an appropriate error code. +// It assumes flags have already been set. +func Run(configFile string, analyzers []*analysis.Analyzer) { + cfg, err := readConfig(configFile) + if err != nil { + log.Fatal(err) + } + + fset := token.NewFileSet() + results, err := run(fset, cfg, analyzers) + if err != nil { + log.Fatal(err) + } + + // In VetxOnly mode, the analysis is run only for facts. + if !cfg.VetxOnly { + if analysisflags.JSON { + // JSON output + tree := make(analysisflags.JSONTree) + for _, res := range results { + tree.Add(fset, cfg.ID, res.a.Name, res.diagnostics, res.err) + } + tree.Print() + } else { + // plain text + exit := 0 + for _, res := range results { + if res.err != nil { + log.Println(res.err) + exit = 1 + } + } + for _, res := range results { + for _, diag := range res.diagnostics { + analysisflags.PrintPlain(fset, diag) + exit = 1 + } + } + os.Exit(exit) + } + } + + os.Exit(0) +} + +func readConfig(filename string) (*Config, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + cfg := new(Config) + if err := json.Unmarshal(data, cfg); err != nil { + return nil, fmt.Errorf("cannot decode JSON config file %s: %v", filename, err) + } + if len(cfg.GoFiles) == 0 { + // The go command disallows packages with no files. + // The only exception is unsafe, but the go command + // doesn't call vet on it. + return nil, fmt.Errorf("package has no files: %s", cfg.ImportPath) + } + return cfg, nil +} + +var importerForCompiler = func(_ *token.FileSet, compiler string, lookup importer.Lookup) types.Importer { + // broken legacy implementation (https://golang.org/issue/28995) + return importer.For(compiler, lookup) +} + +func run(fset *token.FileSet, cfg *Config, analyzers []*analysis.Analyzer) ([]result, error) { + // Load, parse, typecheck. + var files []*ast.File + for _, name := range cfg.GoFiles { + f, err := parser.ParseFile(fset, name, nil, parser.ParseComments) + if err != nil { + if cfg.SucceedOnTypecheckFailure { + // Silently succeed; let the compiler + // report parse errors. + err = nil + } + return nil, err + } + files = append(files, f) + } + compilerImporter := importerForCompiler(fset, cfg.Compiler, func(path string) (io.ReadCloser, error) { + // path is a resolved package path, not an import path. + file, ok := cfg.PackageFile[path] + if !ok { + if cfg.Compiler == "gccgo" && cfg.Standard[path] { + return nil, nil // fall back to default gccgo lookup + } + return nil, fmt.Errorf("no package file for %q", path) + } + return os.Open(file) + }) + importer := importerFunc(func(importPath string) (*types.Package, error) { + path, ok := cfg.ImportMap[importPath] // resolve vendoring, etc + if !ok { + return nil, fmt.Errorf("can't resolve import %q", path) + } + return compilerImporter.Import(path) + }) + tc := &types.Config{ + Importer: importer, + Sizes: types.SizesFor("gc", build.Default.GOARCH), // assume gccgo ≡ gc? + } + info := &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + } + pkg, err := tc.Check(cfg.ImportPath, fset, files, info) + if err != nil { + if cfg.SucceedOnTypecheckFailure { + // Silently succeed; let the compiler + // report type errors. + err = nil + } + return nil, err + } + + // Register fact types with gob. + // In VetxOnly mode, analyzers are only for their facts, + // so we can skip any analysis that neither produces facts + // nor depends on any analysis that produces facts. + // Also build a map to hold working state and result. + type action struct { + once sync.Once + result interface{} + err error + usesFacts bool // (transitively uses) + diagnostics []analysis.Diagnostic + } + actions := make(map[*analysis.Analyzer]*action) + var registerFacts func(a *analysis.Analyzer) bool + registerFacts = func(a *analysis.Analyzer) bool { + act, ok := actions[a] + if !ok { + act = new(action) + var usesFacts bool + for _, f := range a.FactTypes { + usesFacts = true + gob.Register(f) + } + for _, req := range a.Requires { + if registerFacts(req) { + usesFacts = true + } + } + act.usesFacts = usesFacts + actions[a] = act + } + return act.usesFacts + } + var filtered []*analysis.Analyzer + for _, a := range analyzers { + if registerFacts(a) || !cfg.VetxOnly { + filtered = append(filtered, a) + } + } + analyzers = filtered + + // Read facts from imported packages. + read := func(path string) ([]byte, error) { + if vetx, ok := cfg.PackageVetx[path]; ok { + return ioutil.ReadFile(vetx) + } + return nil, nil // no .vetx file, no facts + } + facts, err := facts.Decode(pkg, read) + if err != nil { + return nil, err + } + + // In parallel, execute the DAG of analyzers. + var exec func(a *analysis.Analyzer) *action + var execAll func(analyzers []*analysis.Analyzer) + exec = func(a *analysis.Analyzer) *action { + act := actions[a] + act.once.Do(func() { + execAll(a.Requires) // prefetch dependencies in parallel + + // The inputs to this analysis are the + // results of its prerequisites. + inputs := make(map[*analysis.Analyzer]interface{}) + var failed []string + for _, req := range a.Requires { + reqact := exec(req) + if reqact.err != nil { + failed = append(failed, req.String()) + continue + } + inputs[req] = reqact.result + } + + // Report an error if any dependency failed. + if failed != nil { + sort.Strings(failed) + act.err = fmt.Errorf("failed prerequisites: %s", strings.Join(failed, ", ")) + return + } + + pass := &analysis.Pass{ + Analyzer: a, + Fset: fset, + Files: files, + OtherFiles: cfg.NonGoFiles, + Pkg: pkg, + TypesInfo: info, + TypesSizes: tc.Sizes, + ResultOf: inputs, + Report: func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) }, + ImportObjectFact: facts.ImportObjectFact, + ExportObjectFact: facts.ExportObjectFact, + ImportPackageFact: facts.ImportPackageFact, + ExportPackageFact: facts.ExportPackageFact, + } + + t0 := time.Now() + act.result, act.err = a.Run(pass) + if false { + log.Printf("analysis %s = %s", pass, time.Since(t0)) + } + }) + return act + } + execAll = func(analyzers []*analysis.Analyzer) { + var wg sync.WaitGroup + for _, a := range analyzers { + wg.Add(1) + go func(a *analysis.Analyzer) { + _ = exec(a) + wg.Done() + }(a) + } + wg.Wait() + } + + execAll(analyzers) + + // Return diagnostics and errors from root analyzers. + results := make([]result, len(analyzers)) + for i, a := range analyzers { + act := actions[a] + results[i].a = a + results[i].err = act.err + results[i].diagnostics = act.diagnostics + } + + data := facts.Encode() + if err := ioutil.WriteFile(cfg.VetxOutput, data, 0666); err != nil { + return nil, fmt.Errorf("failed to write analysis facts: %v", err) + } + + return results, nil +} + +type result struct { + a *analysis.Analyzer + diagnostics []analysis.Diagnostic + err error +} + +type importerFunc func(path string) (*types.Package, error) + +func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } diff --git a/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker112.go b/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker112.go new file mode 100644 index 00000000000..683b7e91d25 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker112.go @@ -0,0 +1,9 @@ +// +build go1.12 + +package unitchecker + +import "go/importer" + +func init() { + importerForCompiler = importer.ForCompiler +} diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index 860c3ec156d..22ff769ef2b 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -18,7 +18,7 @@ import ( // Driver type driverRequest struct { - Command string `json "command"` + Command string `json:"command"` Mode LoadMode `json:"mode"` Env []string `json:"env"` BuildFlags []string `json:"build_flags"` diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 067b008d3c2..3a0d4b01236 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -78,7 +78,7 @@ func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { var sizes types.Sizes var sizeserr error var sizeswg sync.WaitGroup - if cfg.Mode >= LoadTypes { + if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { sizeswg.Add(1) go func() { sizes, sizeserr = getSizes(cfg) @@ -121,20 +121,6 @@ extractQueries: } } - // TODO(matloob): Remove the definition of listfunc and just use golistPackages once go1.12 is released. - var listfunc driver - var isFallback bool - listfunc = func(cfg *Config, words ...string) (*driverResponse, error) { - response, err := golistDriverCurrent(cfg, words...) - if _, ok := err.(goTooOldError); ok { - isFallback = true - listfunc = golistDriverFallback - return listfunc(cfg, words...) - } - listfunc = golistDriverCurrent - return response, err - } - response := &responseDeduper{} var err error @@ -142,7 +128,7 @@ extractQueries: // patterns also requires a go list call, since it's the equivalent of // ".". if len(restPatterns) > 0 || len(patterns) == 0 { - dr, err := listfunc(cfg, restPatterns...) + dr, err := golistDriver(cfg, restPatterns...) if err != nil { return nil, err } @@ -161,13 +147,13 @@ extractQueries: var containsCandidates []string if len(containFiles) != 0 { - if err := runContainsQueries(cfg, listfunc, isFallback, response, containFiles); err != nil { + if err := runContainsQueries(cfg, golistDriver, response, containFiles); err != nil { return nil, err } } if len(packagesNamed) != 0 { - if err := runNamedQueries(cfg, listfunc, response, packagesNamed); err != nil { + if err := runNamedQueries(cfg, golistDriver, response, packagesNamed); err != nil { return nil, err } } @@ -180,12 +166,8 @@ extractQueries: containsCandidates = append(containsCandidates, modifiedPkgs...) containsCandidates = append(containsCandidates, needPkgs...) } - - if len(needPkgs) > 0 { - addNeededOverlayPackages(cfg, listfunc, response, needPkgs) - if err != nil { - return nil, err - } + if err := addNeededOverlayPackages(cfg, golistDriver, response, needPkgs); err != nil { + return nil, err } // Check candidate packages for containFiles. if len(containFiles) > 0 { @@ -205,6 +187,9 @@ extractQueries: } func addNeededOverlayPackages(cfg *Config, driver driver, response *responseDeduper, pkgs []string) error { + if len(pkgs) == 0 { + return nil + } dr, err := driver(cfg, pkgs...) if err != nil { return err @@ -212,10 +197,15 @@ func addNeededOverlayPackages(cfg *Config, driver driver, response *responseDedu for _, pkg := range dr.Packages { response.addPackage(pkg) } + _, needPkgs, err := processGolistOverlay(cfg, response.dr) + if err != nil { + return err + } + addNeededOverlayPackages(cfg, driver, response, needPkgs) return nil } -func runContainsQueries(cfg *Config, driver driver, isFallback bool, response *responseDeduper, queries []string) error { +func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, queries []string) error { for _, query := range queries { // TODO(matloob): Do only one query per directory. fdir := filepath.Dir(query) @@ -225,11 +215,6 @@ func runContainsQueries(cfg *Config, driver driver, isFallback bool, response *r if err != nil { return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err) } - if isFallback { - pattern = "." - cfg.Dir = fdir - } - dirResponse, err := driver(cfg, pattern) if err != nil { return err @@ -559,10 +544,10 @@ func otherFiles(p *jsonPackage) [][]string { return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles} } -// golistDriverCurrent uses the "go list" command to expand the -// pattern words and return metadata for the specified packages. -// dir may be "" and env may be nil, as per os/exec.Command. -func golistDriverCurrent(cfg *Config, words ...string) (*driverResponse, error) { +// golistDriver uses the "go list" command to expand the pattern +// words and return metadata for the specified packages. dir may be +// "" and env may be nil, as per os/exec.Command. +func golistDriver(cfg *Config, words ...string) (*driverResponse, error) { // go list uses the following identifiers in ImportPath and Imports: // // "p" -- importable package or main (command) @@ -605,7 +590,7 @@ func golistDriverCurrent(cfg *Config, words ...string) (*driverResponse, error) if old, found := seen[p.ImportPath]; found { if !reflect.DeepEqual(p, old) { - return nil, fmt.Errorf("go list repeated package %v with different values", p.ImportPath) + return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath) } // skip the duplicate continue @@ -720,14 +705,16 @@ func absJoin(dir string, fileses ...[]string) (res []string) { } func golistargs(cfg *Config, words []string) []string { + const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo fullargs := []string{ - "list", "-e", "-json", "-compiled", + "list", "-e", "-json", + fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypesInfo|NeedTypesSizes) != 0), fmt.Sprintf("-test=%t", cfg.Tests), fmt.Sprintf("-export=%t", usesExportData(cfg)), - fmt.Sprintf("-deps=%t", cfg.Mode >= LoadImports), + fmt.Sprintf("-deps=%t", cfg.Mode&NeedDeps != 0), // go list doesn't let you pass -test and -find together, // probably because you'd just get the TestMain. - fmt.Sprintf("-find=%t", cfg.Mode < LoadImports && !cfg.Tests), + fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0), } fullargs = append(fullargs, cfg.BuildFlags...) fullargs = append(fullargs, "--") @@ -757,10 +744,14 @@ func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { } if err := cmd.Run(); err != nil { + // Check for 'go' executable not being found. + if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound { + return nil, fmt.Errorf("'go list' driver requires 'go', but %s", exec.ErrNotFound) + } + exitErr, ok := err.(*exec.ExitError) if !ok { // Catastrophic error: - // - executable not found // - context cancellation return nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err) } @@ -770,6 +761,22 @@ func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)} } + // This error only appears in stderr. See golang.org/cl/166398 for a fix in go list to show + // the error in the Err section of stdout in case -e option is provided. + // This fix is provided for backwards compatibility. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must be .go files") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for #29280: go list -e has incorrect behavior when an ad-hoc package doesn't exist. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no such file or directory") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + // Export mode entails a build. // If that build fails, errors appear on stderr // (despite the -e flag) and the Export field is blank. diff --git a/vendor/golang.org/x/tools/go/packages/golist_fallback.go b/vendor/golang.org/x/tools/go/packages/golist_fallback.go deleted file mode 100644 index 141fa19ac19..00000000000 --- a/vendor/golang.org/x/tools/go/packages/golist_fallback.go +++ /dev/null @@ -1,450 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packages - -import ( - "encoding/json" - "fmt" - "go/build" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "sort" - "strings" - - "golang.org/x/tools/go/internal/cgo" -) - -// TODO(matloob): Delete this file once Go 1.12 is released. - -// This file provides backwards compatibility support for -// loading for versions of Go earlier than 1.11. This support is meant to -// assist with migration to the Package API until there's -// widespread adoption of these newer Go versions. -// This support will be removed once Go 1.12 is released -// in Q1 2019. - -func golistDriverFallback(cfg *Config, words ...string) (*driverResponse, error) { - // Turn absolute paths into GOROOT and GOPATH-relative paths to provide to go list. - // This will have surprising behavior if GOROOT or GOPATH contain multiple packages with the same - // path and a user provides an absolute path to a directory that's shadowed by an earlier - // directory in GOROOT or GOPATH with the same package path. - words = cleanAbsPaths(cfg, words) - - original, deps, err := getDeps(cfg, words...) - if err != nil { - return nil, err - } - - var tmpdir string // used for generated cgo files - var needsTestVariant []struct { - pkg, xtestPkg *Package - } - - var response driverResponse - allPkgs := make(map[string]bool) - addPackage := func(p *jsonPackage, isRoot bool) { - id := p.ImportPath - - if allPkgs[id] { - return - } - allPkgs[id] = true - - pkgpath := id - - if pkgpath == "unsafe" { - p.GoFiles = nil // ignore fake unsafe.go file - } - - importMap := func(importlist []string) map[string]*Package { - importMap := make(map[string]*Package) - for _, id := range importlist { - - if id == "C" { - for _, path := range []string{"unsafe", "syscall", "runtime/cgo"} { - if pkgpath != path && importMap[path] == nil { - importMap[path] = &Package{ID: path} - } - } - continue - } - importMap[vendorlessPath(id)] = &Package{ID: id} - } - return importMap - } - compiledGoFiles := absJoin(p.Dir, p.GoFiles) - // Use a function to simplify control flow. It's just a bunch of gotos. - var cgoErrors []error - var outdir string - getOutdir := func() (string, error) { - if outdir != "" { - return outdir, nil - } - if tmpdir == "" { - if tmpdir, err = ioutil.TempDir("", "gopackages"); err != nil { - return "", err - } - } - outdir = filepath.Join(tmpdir, strings.Replace(p.ImportPath, "/", "_", -1)) - if err := os.MkdirAll(outdir, 0755); err != nil { - outdir = "" - return "", err - } - return outdir, nil - } - processCgo := func() bool { - // Suppress any cgo errors. Any relevant errors will show up in typechecking. - // TODO(matloob): Skip running cgo if Mode < LoadTypes. - outdir, err := getOutdir() - if err != nil { - cgoErrors = append(cgoErrors, err) - return false - } - files, _, err := runCgo(p.Dir, outdir, cfg.Env) - if err != nil { - cgoErrors = append(cgoErrors, err) - return false - } - compiledGoFiles = append(compiledGoFiles, files...) - return true - } - if len(p.CgoFiles) == 0 || !processCgo() { - compiledGoFiles = append(compiledGoFiles, absJoin(p.Dir, p.CgoFiles)...) // Punt to typechecker. - } - if isRoot { - response.Roots = append(response.Roots, id) - } - pkg := &Package{ - ID: id, - Name: p.Name, - GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), - CompiledGoFiles: compiledGoFiles, - OtherFiles: absJoin(p.Dir, otherFiles(p)...), - PkgPath: pkgpath, - Imports: importMap(p.Imports), - // TODO(matloob): set errors on the Package to cgoErrors - } - if p.Error != nil { - pkg.Errors = append(pkg.Errors, Error{ - Pos: p.Error.Pos, - Msg: p.Error.Err, - }) - } - response.Packages = append(response.Packages, pkg) - if cfg.Tests && isRoot { - testID := fmt.Sprintf("%s [%s.test]", id, id) - if len(p.TestGoFiles) > 0 || len(p.XTestGoFiles) > 0 { - response.Roots = append(response.Roots, testID) - testPkg := &Package{ - ID: testID, - Name: p.Name, - GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles, p.TestGoFiles), - CompiledGoFiles: append(compiledGoFiles, absJoin(p.Dir, p.TestGoFiles)...), - OtherFiles: absJoin(p.Dir, otherFiles(p)...), - PkgPath: pkgpath, - Imports: importMap(append(p.Imports, p.TestImports...)), - // TODO(matloob): set errors on the Package to cgoErrors - } - response.Packages = append(response.Packages, testPkg) - var xtestPkg *Package - if len(p.XTestGoFiles) > 0 { - xtestID := fmt.Sprintf("%s_test [%s.test]", id, id) - response.Roots = append(response.Roots, xtestID) - // Generate test variants for all packages q where a path exists - // such that xtestPkg -> ... -> q -> ... -> p (where p is the package under test) - // and rewrite all import map entries of p to point to testPkg (the test variant of - // p), and of each q to point to the test variant of that q. - xtestPkg = &Package{ - ID: xtestID, - Name: p.Name + "_test", - GoFiles: absJoin(p.Dir, p.XTestGoFiles), - CompiledGoFiles: absJoin(p.Dir, p.XTestGoFiles), - PkgPath: pkgpath + "_test", - Imports: importMap(p.XTestImports), - } - // Add to list of packages we need to rewrite imports for to refer to test variants. - // We may need to create a test variant of a package that hasn't been loaded yet, so - // the test variants need to be created later. - needsTestVariant = append(needsTestVariant, struct{ pkg, xtestPkg *Package }{pkg, xtestPkg}) - response.Packages = append(response.Packages, xtestPkg) - } - // testmain package - testmainID := id + ".test" - response.Roots = append(response.Roots, testmainID) - imports := map[string]*Package{} - imports[testPkg.PkgPath] = &Package{ID: testPkg.ID} - if xtestPkg != nil { - imports[xtestPkg.PkgPath] = &Package{ID: xtestPkg.ID} - } - testmainPkg := &Package{ - ID: testmainID, - Name: "main", - PkgPath: testmainID, - Imports: imports, - } - response.Packages = append(response.Packages, testmainPkg) - outdir, err := getOutdir() - if err != nil { - testmainPkg.Errors = append(testmainPkg.Errors, Error{ - Pos: "-", - Msg: fmt.Sprintf("failed to generate testmain: %v", err), - Kind: ListError, - }) - return - } - // Don't use a .go extension on the file, so that the tests think the file is inside GOCACHE. - // This allows the same test to test the pre- and post-Go 1.11 go list logic because the Go 1.11 - // go list generates test mains in the cache, and the test code knows not to rely on paths in the - // cache to stay stable. - testmain := filepath.Join(outdir, "testmain-go") - extraimports, extradeps, err := generateTestmain(testmain, testPkg, xtestPkg) - if err != nil { - testmainPkg.Errors = append(testmainPkg.Errors, Error{ - Pos: "-", - Msg: fmt.Sprintf("failed to generate testmain: %v", err), - Kind: ListError, - }) - } - deps = append(deps, extradeps...) - for _, imp := range extraimports { // testing, testing/internal/testdeps, and maybe os - imports[imp] = &Package{ID: imp} - } - testmainPkg.GoFiles = []string{testmain} - testmainPkg.CompiledGoFiles = []string{testmain} - } - } - } - - for _, pkg := range original { - addPackage(pkg, true) - } - if cfg.Mode < LoadImports || len(deps) == 0 { - return &response, nil - } - - buf, err := invokeGo(cfg, golistArgsFallback(cfg, deps)...) - if err != nil { - return nil, err - } - - // Decode the JSON and convert it to Package form. - for dec := json.NewDecoder(buf); dec.More(); { - p := new(jsonPackage) - if err := dec.Decode(p); err != nil { - return nil, fmt.Errorf("JSON decoding failed: %v", err) - } - - addPackage(p, false) - } - - for _, v := range needsTestVariant { - createTestVariants(&response, v.pkg, v.xtestPkg) - } - - return &response, nil -} - -func createTestVariants(response *driverResponse, pkgUnderTest, xtestPkg *Package) { - allPkgs := make(map[string]*Package) - for _, pkg := range response.Packages { - allPkgs[pkg.ID] = pkg - } - needsTestVariant := make(map[string]bool) - needsTestVariant[pkgUnderTest.ID] = true - var needsVariantRec func(p *Package) bool - needsVariantRec = func(p *Package) bool { - if needsTestVariant[p.ID] { - return true - } - for _, imp := range p.Imports { - if needsVariantRec(allPkgs[imp.ID]) { - // Don't break because we want to make sure all dependencies - // have been processed, and all required test variants of our dependencies - // exist. - needsTestVariant[p.ID] = true - } - } - if !needsTestVariant[p.ID] { - return false - } - // Create a clone of the package. It will share the same strings and lists of source files, - // but that's okay. It's only necessary for the Imports map to have a separate identity. - testVariant := *p - testVariant.ID = fmt.Sprintf("%s [%s.test]", p.ID, pkgUnderTest.ID) - testVariant.Imports = make(map[string]*Package) - for imp, pkg := range p.Imports { - testVariant.Imports[imp] = pkg - if needsTestVariant[pkg.ID] { - testVariant.Imports[imp] = &Package{ID: fmt.Sprintf("%s [%s.test]", pkg.ID, pkgUnderTest.ID)} - } - } - response.Packages = append(response.Packages, &testVariant) - return needsTestVariant[p.ID] - } - // finally, update the xtest package's imports - for imp, pkg := range xtestPkg.Imports { - if allPkgs[pkg.ID] == nil { - fmt.Printf("for %s: package %s doesn't exist\n", xtestPkg.ID, pkg.ID) - } - if needsVariantRec(allPkgs[pkg.ID]) { - xtestPkg.Imports[imp] = &Package{ID: fmt.Sprintf("%s [%s.test]", pkg.ID, pkgUnderTest.ID)} - } - } -} - -// cleanAbsPaths replaces all absolute paths with GOPATH- and GOROOT-relative -// paths. If an absolute path is not GOPATH- or GOROOT- relative, it is left as an -// absolute path so an error can be returned later. -func cleanAbsPaths(cfg *Config, words []string) []string { - var searchpaths []string - var cleaned = make([]string, len(words)) - for i := range cleaned { - cleaned[i] = words[i] - // Ignore relative directory paths (they must already be goroot-relative) and Go source files - // (absolute source files are already allowed for ad-hoc packages). - // TODO(matloob): Can there be non-.go files in ad-hoc packages. - if !filepath.IsAbs(cleaned[i]) || strings.HasSuffix(cleaned[i], ".go") { - continue - } - // otherwise, it's an absolute path. Search GOPATH and GOROOT to find it. - if searchpaths == nil { - cmd := exec.Command("go", "env", "GOPATH", "GOROOT") - cmd.Env = cfg.Env - out, err := cmd.Output() - if err != nil { - searchpaths = []string{} - continue // suppress the error, it will show up again when running go list - } - lines := strings.Split(string(out), "\n") - if len(lines) != 3 || lines[0] == "" || lines[1] == "" || lines[2] != "" { - continue // suppress error - } - // first line is GOPATH - for _, path := range filepath.SplitList(lines[0]) { - searchpaths = append(searchpaths, filepath.Join(path, "src")) - } - // second line is GOROOT - searchpaths = append(searchpaths, filepath.Join(lines[1], "src")) - } - for _, sp := range searchpaths { - if strings.HasPrefix(cleaned[i], sp) { - cleaned[i] = strings.TrimPrefix(cleaned[i], sp) - cleaned[i] = strings.TrimLeft(cleaned[i], string(filepath.Separator)) - } - } - } - return cleaned -} - -// vendorlessPath returns the devendorized version of the import path ipath. -// For example, VendorlessPath("foo/bar/vendor/a/b") returns "a/b". -// Copied from golang.org/x/tools/imports/fix.go. -func vendorlessPath(ipath string) string { - // Devendorize for use in import statement. - if i := strings.LastIndex(ipath, "/vendor/"); i >= 0 { - return ipath[i+len("/vendor/"):] - } - if strings.HasPrefix(ipath, "vendor/") { - return ipath[len("vendor/"):] - } - return ipath -} - -// getDeps runs an initial go list to determine all the dependency packages. -func getDeps(cfg *Config, words ...string) (initial []*jsonPackage, deps []string, err error) { - buf, err := invokeGo(cfg, golistArgsFallback(cfg, words)...) - if err != nil { - return nil, nil, err - } - - depsSet := make(map[string]bool) - var testImports []string - - // Extract deps from the JSON. - for dec := json.NewDecoder(buf); dec.More(); { - p := new(jsonPackage) - if err := dec.Decode(p); err != nil { - return nil, nil, fmt.Errorf("JSON decoding failed: %v", err) - } - - initial = append(initial, p) - for _, dep := range p.Deps { - depsSet[dep] = true - } - if cfg.Tests { - // collect the additional imports of the test packages. - pkgTestImports := append(p.TestImports, p.XTestImports...) - for _, imp := range pkgTestImports { - if depsSet[imp] { - continue - } - depsSet[imp] = true - testImports = append(testImports, imp) - } - } - } - // Get the deps of the packages imported by tests. - if len(testImports) > 0 { - buf, err = invokeGo(cfg, golistArgsFallback(cfg, testImports)...) - if err != nil { - return nil, nil, err - } - // Extract deps from the JSON. - for dec := json.NewDecoder(buf); dec.More(); { - p := new(jsonPackage) - if err := dec.Decode(p); err != nil { - return nil, nil, fmt.Errorf("JSON decoding failed: %v", err) - } - for _, dep := range p.Deps { - depsSet[dep] = true - } - } - } - - for _, orig := range initial { - delete(depsSet, orig.ImportPath) - } - - deps = make([]string, 0, len(depsSet)) - for dep := range depsSet { - deps = append(deps, dep) - } - sort.Strings(deps) // ensure output is deterministic - return initial, deps, nil -} - -func golistArgsFallback(cfg *Config, words []string) []string { - fullargs := []string{"list", "-e", "-json"} - fullargs = append(fullargs, cfg.BuildFlags...) - fullargs = append(fullargs, "--") - fullargs = append(fullargs, words...) - return fullargs -} - -func runCgo(pkgdir, tmpdir string, env []string) (files, displayfiles []string, err error) { - // Use go/build to open cgo files and determine the cgo flags, etc, from them. - // This is tricky so it's best to avoid reimplementing as much as we can, and - // we plan to delete this support once Go 1.12 is released anyways. - // TODO(matloob): This isn't completely correct because we're using the Default - // context. Perhaps we should more accurately fill in the context. - bp, err := build.ImportDir(pkgdir, build.ImportMode(0)) - if err != nil { - return nil, nil, err - } - for _, ev := range env { - if v := strings.TrimPrefix(ev, "CGO_CPPFLAGS"); v != ev { - bp.CgoCPPFLAGS = append(bp.CgoCPPFLAGS, strings.Fields(v)...) - } else if v := strings.TrimPrefix(ev, "CGO_CFLAGS"); v != ev { - bp.CgoCFLAGS = append(bp.CgoCFLAGS, strings.Fields(v)...) - } else if v := strings.TrimPrefix(ev, "CGO_CXXFLAGS"); v != ev { - bp.CgoCXXFLAGS = append(bp.CgoCXXFLAGS, strings.Fields(v)...) - } else if v := strings.TrimPrefix(ev, "CGO_LDFLAGS"); v != ev { - bp.CgoLDFLAGS = append(bp.CgoLDFLAGS, strings.Fields(v)...) - } - } - return cgo.Run(bp, pkgdir, tmpdir, true) -} diff --git a/vendor/golang.org/x/tools/go/packages/golist_fallback_testmain.go b/vendor/golang.org/x/tools/go/packages/golist_fallback_testmain.go deleted file mode 100644 index 128e00e25aa..00000000000 --- a/vendor/golang.org/x/tools/go/packages/golist_fallback_testmain.go +++ /dev/null @@ -1,318 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file is largely based on the Go 1.10-era cmd/go/internal/test/test.go -// testmain generation code. - -package packages - -import ( - "errors" - "fmt" - "go/ast" - "go/doc" - "go/parser" - "go/token" - "os" - "sort" - "strings" - "text/template" - "unicode" - "unicode/utf8" -) - -// TODO(matloob): Delete this file once Go 1.12 is released. - -// This file complements golist_fallback.go by providing -// support for generating testmains. - -func generateTestmain(out string, testPkg, xtestPkg *Package) (extraimports, extradeps []string, err error) { - testFuncs, err := loadTestFuncs(testPkg, xtestPkg) - if err != nil { - return nil, nil, err - } - extraimports = []string{"testing", "testing/internal/testdeps"} - if testFuncs.TestMain == nil { - extraimports = append(extraimports, "os") - } - // Transitive dependencies of ("testing", "testing/internal/testdeps"). - // os is part of the transitive closure so it and its transitive dependencies are - // included regardless of whether it's imported in the template below. - extradeps = []string{ - "errors", - "internal/cpu", - "unsafe", - "internal/bytealg", - "internal/race", - "runtime/internal/atomic", - "runtime/internal/sys", - "runtime", - "sync/atomic", - "sync", - "io", - "unicode", - "unicode/utf8", - "bytes", - "math", - "syscall", - "time", - "internal/poll", - "internal/syscall/unix", - "internal/testlog", - "os", - "math/bits", - "strconv", - "reflect", - "fmt", - "sort", - "strings", - "flag", - "runtime/debug", - "context", - "runtime/trace", - "testing", - "bufio", - "regexp/syntax", - "regexp", - "compress/flate", - "encoding/binary", - "hash", - "hash/crc32", - "compress/gzip", - "path/filepath", - "io/ioutil", - "text/tabwriter", - "runtime/pprof", - "testing/internal/testdeps", - } - return extraimports, extradeps, writeTestmain(out, testFuncs) -} - -// The following is adapted from the cmd/go testmain generation code. - -// isTestFunc tells whether fn has the type of a testing function. arg -// specifies the parameter type we look for: B, M or T. -func isTestFunc(fn *ast.FuncDecl, arg string) bool { - if fn.Type.Results != nil && len(fn.Type.Results.List) > 0 || - fn.Type.Params.List == nil || - len(fn.Type.Params.List) != 1 || - len(fn.Type.Params.List[0].Names) > 1 { - return false - } - ptr, ok := fn.Type.Params.List[0].Type.(*ast.StarExpr) - if !ok { - return false - } - // We can't easily check that the type is *testing.M - // because we don't know how testing has been imported, - // but at least check that it's *M or *something.M. - // Same applies for B and T. - if name, ok := ptr.X.(*ast.Ident); ok && name.Name == arg { - return true - } - if sel, ok := ptr.X.(*ast.SelectorExpr); ok && sel.Sel.Name == arg { - return true - } - return false -} - -// isTest tells whether name looks like a test (or benchmark, according to prefix). -// It is a Test (say) if there is a character after Test that is not a lower-case letter. -// We don't want TesticularCancer. -func isTest(name, prefix string) bool { - if !strings.HasPrefix(name, prefix) { - return false - } - if len(name) == len(prefix) { // "Test" is ok - return true - } - rune, _ := utf8.DecodeRuneInString(name[len(prefix):]) - return !unicode.IsLower(rune) -} - -// loadTestFuncs returns the testFuncs describing the tests that will be run. -func loadTestFuncs(ptest, pxtest *Package) (*testFuncs, error) { - t := &testFuncs{ - TestPackage: ptest, - XTestPackage: pxtest, - } - for _, file := range ptest.GoFiles { - if !strings.HasSuffix(file, "_test.go") { - continue - } - if err := t.load(file, "_test", &t.ImportTest, &t.NeedTest); err != nil { - return nil, err - } - } - if pxtest != nil { - for _, file := range pxtest.GoFiles { - if err := t.load(file, "_xtest", &t.ImportXtest, &t.NeedXtest); err != nil { - return nil, err - } - } - } - return t, nil -} - -// writeTestmain writes the _testmain.go file for t to the file named out. -func writeTestmain(out string, t *testFuncs) error { - f, err := os.Create(out) - if err != nil { - return err - } - defer f.Close() - - if err := testmainTmpl.Execute(f, t); err != nil { - return err - } - - return nil -} - -type testFuncs struct { - Tests []testFunc - Benchmarks []testFunc - Examples []testFunc - TestMain *testFunc - TestPackage *Package - XTestPackage *Package - ImportTest bool - NeedTest bool - ImportXtest bool - NeedXtest bool -} - -// Tested returns the name of the package being tested. -func (t *testFuncs) Tested() string { - return t.TestPackage.Name -} - -type testFunc struct { - Package string // imported package name (_test or _xtest) - Name string // function name - Output string // output, for examples - Unordered bool // output is allowed to be unordered. -} - -func (t *testFuncs) load(filename, pkg string, doImport, seen *bool) error { - var fset = token.NewFileSet() - - f, err := parser.ParseFile(fset, filename, nil, parser.ParseComments) - if err != nil { - return errors.New("failed to parse test file " + filename) - } - for _, d := range f.Decls { - n, ok := d.(*ast.FuncDecl) - if !ok { - continue - } - if n.Recv != nil { - continue - } - name := n.Name.String() - switch { - case name == "TestMain": - if isTestFunc(n, "T") { - t.Tests = append(t.Tests, testFunc{pkg, name, "", false}) - *doImport, *seen = true, true - continue - } - err := checkTestFunc(fset, n, "M") - if err != nil { - return err - } - if t.TestMain != nil { - return errors.New("multiple definitions of TestMain") - } - t.TestMain = &testFunc{pkg, name, "", false} - *doImport, *seen = true, true - case isTest(name, "Test"): - err := checkTestFunc(fset, n, "T") - if err != nil { - return err - } - t.Tests = append(t.Tests, testFunc{pkg, name, "", false}) - *doImport, *seen = true, true - case isTest(name, "Benchmark"): - err := checkTestFunc(fset, n, "B") - if err != nil { - return err - } - t.Benchmarks = append(t.Benchmarks, testFunc{pkg, name, "", false}) - *doImport, *seen = true, true - } - } - ex := doc.Examples(f) - sort.Slice(ex, func(i, j int) bool { return ex[i].Order < ex[j].Order }) - for _, e := range ex { - *doImport = true // import test file whether executed or not - if e.Output == "" && !e.EmptyOutput { - // Don't run examples with no output. - continue - } - t.Examples = append(t.Examples, testFunc{pkg, "Example" + e.Name, e.Output, e.Unordered}) - *seen = true - } - return nil -} - -func checkTestFunc(fset *token.FileSet, fn *ast.FuncDecl, arg string) error { - if !isTestFunc(fn, arg) { - name := fn.Name.String() - pos := fset.Position(fn.Pos()) - return fmt.Errorf("%s: wrong signature for %s, must be: func %s(%s *testing.%s)", pos, name, name, strings.ToLower(arg), arg) - } - return nil -} - -var testmainTmpl = template.Must(template.New("main").Parse(` -package main - -import ( -{{if not .TestMain}} - "os" -{{end}} - "testing" - "testing/internal/testdeps" - -{{if .ImportTest}} - {{if .NeedTest}}_test{{else}}_{{end}} {{.TestPackage.PkgPath | printf "%q"}} -{{end}} -{{if .ImportXtest}} - {{if .NeedXtest}}_xtest{{else}}_{{end}} {{.XTestPackage.PkgPath | printf "%q"}} -{{end}} -) - -var tests = []testing.InternalTest{ -{{range .Tests}} - {"{{.Name}}", {{.Package}}.{{.Name}}}, -{{end}} -} - -var benchmarks = []testing.InternalBenchmark{ -{{range .Benchmarks}} - {"{{.Name}}", {{.Package}}.{{.Name}}}, -{{end}} -} - -var examples = []testing.InternalExample{ -{{range .Examples}} - {"{{.Name}}", {{.Package}}.{{.Name}}, {{.Output | printf "%q"}}, {{.Unordered}}}, -{{end}} -} - -func init() { - testdeps.ImportPath = {{.TestPackage.PkgPath | printf "%q"}} -} - -func main() { - m := testing.MainStart(testdeps.TestDeps{}, tests, benchmarks, examples) -{{with .TestMain}} - {{.Package}}.{{.Name}}(m) -{{else}} - os.Exit(m.Run()) -{{end}} -} - -`)) diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go index 71ffcd9d55b..33a0a28f2c4 100644 --- a/vendor/golang.org/x/tools/go/packages/golist_overlay.go +++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -46,7 +46,9 @@ outer: fileExists = true } } - if dirContains { + // The overlay could have included an entirely new package. + isNewPackage := extractPackage(pkg, path, contents) + if dirContains || isNewPackage { if !fileExists { pkg.GoFiles = append(pkg.GoFiles, path) // TODO(matloob): should the file just be added to GoFiles? pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, path) @@ -102,3 +104,35 @@ func extractImports(filename string, contents []byte) ([]string, error) { } return res, nil } + +// extractPackage attempts to extract a package defined in an overlay. +// +// If the package has errors and has no Name, GoFiles, or Imports, +// then it's possible that it doesn't yet exist on disk. +func extractPackage(pkg *Package, filename string, contents []byte) bool { + // TODO(rstambler): Check the message of the actual error? + // It differs between $GOPATH and module mode. + if len(pkg.Errors) != 1 { + return false + } + if pkg.Name != "" || pkg.ExportFile != "" { + return false + } + if len(pkg.GoFiles) > 0 || len(pkg.CompiledGoFiles) > 0 || len(pkg.OtherFiles) > 0 { + return false + } + if len(pkg.Imports) > 0 { + return false + } + f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.PackageClauseOnly) // TODO(matloob): reuse fileset? + if err != nil { + return false + } + // TODO(rstambler): This doesn't work for main packages. + if filepath.Base(pkg.PkgPath) != f.Name.Name { + return false + } + pkg.Name = f.Name.Name + pkg.Errors = nil + return true +} diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 1e5836c9e9e..eedd43bb6b2 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -30,32 +30,78 @@ import ( // but may be slower. Load may return more information than requested. type LoadMode int +const ( + // The following constants are used to specify which fields of the Package + // should be filled when loading is done. As a special case to provide + // backwards compatibility, a LoadMode of 0 is equivalent to LoadFiles. + // For all other LoadModes, the bits below specify which fields will be filled + // in the result packages. + // WARNING: This part of the go/packages API is EXPERIMENTAL. It might + // be changed or removed up until April 15 2019. After that date it will + // be frozen. + // TODO(matloob): Remove this comment on April 15. + + // ID and Errors (if present) will always be filled. + + // NeedName adds Name and PkgPath. + NeedName LoadMode = 1 << iota + + // NeedFiles adds GoFiles and OtherFiles. + NeedFiles + + // NeedCompiledGoFiles adds CompiledGoFiles. + NeedCompiledGoFiles + + // NeedImports adds Imports. If NeedDeps is not set, the Imports field will contain + // "placeholder" Packages with only the ID set. + NeedImports + + // NeedDeps adds the fields requested by the LoadMode in the packages in Imports. If NeedImports + // is not set NeedDeps has no effect. + NeedDeps + + // NeedExportsFile adds ExportsFile. + NeedExportsFile + + // NeedTypes adds Types, Fset, and IllTyped. + NeedTypes + + // NeedSyntax adds Syntax. + NeedSyntax + + // NeedTypesInfo adds TypesInfo. + NeedTypesInfo + + // NeedTypesSizes adds TypesSizes. + NeedTypesSizes +) + const ( // LoadFiles finds the packages and computes their source file lists. - // Package fields: ID, Name, Errors, GoFiles, and OtherFiles. - LoadFiles LoadMode = iota + // Package fields: ID, Name, Errors, GoFiles, CompiledGoFiles, and OtherFiles. + LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles // LoadImports adds import information for each package // and its dependencies. // Package fields added: Imports. - LoadImports + LoadImports = LoadFiles | NeedImports | NeedDeps // LoadTypes adds type information for package-level // declarations in the packages matching the patterns. - // Package fields added: Types, Fset, and IllTyped. + // Package fields added: Types, TypesSizes, Fset, and IllTyped. // This mode uses type information provided by the build system when // possible, and may fill in the ExportFile field. - LoadTypes + LoadTypes = LoadImports | NeedTypes | NeedTypesSizes // LoadSyntax adds typed syntax trees for the packages matching the patterns. // Package fields added: Syntax, and TypesInfo, for direct pattern matches only. - LoadSyntax + LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo // LoadAllSyntax adds typed syntax trees for the packages matching the patterns // and all dependencies. // Package fields added: Types, Fset, IllTyped, Syntax, and TypesInfo, // for all packages in the import graph. - LoadAllSyntax + LoadAllSyntax = LoadSyntax ) // A Config specifies details about how packages should be loaded. @@ -91,7 +137,7 @@ type Config struct { BuildFlags []string // Fset provides source position information for syntax trees and types. - // If Fset is nil, the loader will create a new FileSet. + // If Fset is nil, Load will use a new fileset, but preserve Fset's value. Fset *token.FileSet // ParseFile is called to read and parse each file @@ -372,15 +418,34 @@ type loaderPackage struct { type loader struct { pkgs map[string]*loaderPackage Config - sizes types.Sizes - exportMu sync.Mutex // enforces mutual exclusion of exportdata operations + sizes types.Sizes + parseCache map[string]*parseValue + parseCacheMu sync.Mutex + exportMu sync.Mutex // enforces mutual exclusion of exportdata operations + + // TODO(matloob): Add an implied mode here and use that instead of mode. + // Implied mode would contain all the fields we need the data for so we can + // get the actually requested fields. We'll zero them out before returning + // packages to the user. This will make it easier for us to get the conditions + // where we need certain modes right. +} + +type parseValue struct { + f *ast.File + err error + ready chan struct{} } func newLoader(cfg *Config) *loader { - ld := &loader{} + ld := &loader{ + parseCache: map[string]*parseValue{}, + } if cfg != nil { ld.Config = *cfg } + if ld.Config.Mode == 0 { + ld.Config.Mode = LoadFiles // Preserve zero behavior of Mode for backwards compatibility. + } if ld.Config.Env == nil { ld.Config.Env = os.Environ() } @@ -393,7 +458,7 @@ func newLoader(cfg *Config) *loader { } } - if ld.Mode >= LoadTypes { + if ld.Mode&NeedTypes != 0 { if ld.Fset == nil { ld.Fset = token.NewFileSet() } @@ -402,12 +467,8 @@ func newLoader(cfg *Config) *loader { // because we load source if export data is missing. if ld.ParseFile == nil { ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) { - var isrc interface{} - if src != nil { - isrc = src - } const mode = parser.AllErrors | parser.ParseComments - return parser.ParseFile(fset, filename, isrc, mode) + return parser.ParseFile(fset, filename, src, mode) } } } @@ -430,11 +491,9 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { rootIndex = i } lpkg := &loaderPackage{ - Package: pkg, - needtypes: ld.Mode >= LoadAllSyntax || - ld.Mode >= LoadTypes && rootIndex >= 0, - needsrc: ld.Mode >= LoadAllSyntax || - ld.Mode >= LoadSyntax && rootIndex >= 0 || + Package: pkg, + needtypes: (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && rootIndex < 0) || rootIndex >= 0, + needsrc: (ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && rootIndex < 0) || rootIndex >= 0 || len(ld.Overlay) > 0 || // Overlays can invalidate export data. TODO(matloob): make this check fine-grained based on dependencies on overlaid files pkg.ExportFile == "" && pkg.PkgPath != "unsafe", } @@ -507,14 +566,17 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { if lpkg.needsrc { srcPkgs = append(srcPkgs, lpkg) } + if ld.Mode&NeedTypesSizes != 0 { + lpkg.TypesSizes = ld.sizes + } stack = stack[:len(stack)-1] // pop lpkg.color = black return lpkg.needsrc } - if ld.Mode < LoadImports { - //we do this to drop the stub import packages that we are not even going to try to resolve + if ld.Mode&(NeedImports|NeedDeps) == 0 { + // We do this to drop the stub import packages that we are not even going to try to resolve. for _, lpkg := range initial { lpkg.Imports = nil } @@ -524,17 +586,19 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { visit(lpkg) } } - for _, lpkg := range srcPkgs { - // Complete type information is required for the - // immediate dependencies of each source package. - for _, ipkg := range lpkg.Imports { - imp := ld.pkgs[ipkg.ID] - imp.needtypes = true + if ld.Mode&NeedDeps != 0 { // TODO(matloob): This is only the case if NeedTypes is also set, right? + for _, lpkg := range srcPkgs { + // Complete type information is required for the + // immediate dependencies of each source package. + for _, ipkg := range lpkg.Imports { + imp := ld.pkgs[ipkg.ID] + imp.needtypes = true + } } } // Load type data if needed, starting at // the initial packages (roots of the import DAG). - if ld.Mode >= LoadTypes { + if ld.Mode&NeedTypes != 0 { var wg sync.WaitGroup for _, lpkg := range initial { wg.Add(1) @@ -547,16 +611,61 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { } result := make([]*Package, len(initial)) + importPlaceholders := make(map[string]*Package) for i, lpkg := range initial { result[i] = lpkg.Package } + for i := range ld.pkgs { + // Clear all unrequested fields, for extra de-Hyrum-ization. + if ld.Mode&NeedName == 0 { + ld.pkgs[i].Name = "" + ld.pkgs[i].PkgPath = "" + } + if ld.Mode&NeedFiles == 0 { + ld.pkgs[i].GoFiles = nil + ld.pkgs[i].OtherFiles = nil + } + if ld.Mode&NeedCompiledGoFiles == 0 { + ld.pkgs[i].CompiledGoFiles = nil + } + if ld.Mode&NeedImports == 0 { + ld.pkgs[i].Imports = nil + } + if ld.Mode&NeedExportsFile == 0 { + ld.pkgs[i].ExportFile = "" + } + if ld.Mode&NeedTypes == 0 { + ld.pkgs[i].Types = nil + ld.pkgs[i].Fset = nil + ld.pkgs[i].IllTyped = false + } + if ld.Mode&NeedSyntax == 0 { + ld.pkgs[i].Syntax = nil + } + if ld.Mode&NeedTypesInfo == 0 { + ld.pkgs[i].TypesInfo = nil + } + if ld.Mode&NeedTypesSizes == 0 { + ld.pkgs[i].TypesSizes = nil + } + if ld.Mode&NeedDeps == 0 { + for j, pkg := range ld.pkgs[i].Imports { + ph, ok := importPlaceholders[pkg.ID] + if !ok { + ph = &Package{ID: pkg.ID} + importPlaceholders[pkg.ID] = ph + } + ld.pkgs[i].Imports[j] = ph + } + } + } return result, nil } // loadRecursive loads the specified package and its dependencies, // recursively, in parallel, in topological order. // It is atomic and idempotent. -// Precondition: ld.Mode >= LoadTypes. +// Precondition: ld.Mode&NeedTypes. func (ld *loader) loadRecursive(lpkg *loaderPackage) { lpkg.loadOnce.Do(func() { // Load the direct dependencies, in parallel. @@ -708,7 +817,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { // Type-check bodies of functions only in non-initial packages. // Example: for import graph A->B->C and initial packages {A,C}, // we can ignore function bodies in B. - IgnoreFuncBodies: ld.Mode < LoadAllSyntax && !lpkg.initial, + IgnoreFuncBodies: (ld.Mode&(NeedDeps|NeedTypesInfo) == 0) && !lpkg.initial, Error: appendError, Sizes: ld.sizes, @@ -761,6 +870,42 @@ func (f importerFunc) Import(path string) (*types.Package, error) { return f(pat // the number of parallel I/O calls per process. var ioLimit = make(chan bool, 20) +func (ld *loader) parseFile(filename string) (*ast.File, error) { + ld.parseCacheMu.Lock() + v, ok := ld.parseCache[filename] + if ok { + // cache hit + ld.parseCacheMu.Unlock() + <-v.ready + } else { + // cache miss + v = &parseValue{ready: make(chan struct{})} + ld.parseCache[filename] = v + ld.parseCacheMu.Unlock() + + var src []byte + for f, contents := range ld.Config.Overlay { + if sameFile(f, filename) { + src = contents + } + } + var err error + if src == nil { + ioLimit <- true // wait + src, err = ioutil.ReadFile(filename) + <-ioLimit // signal + } + if err != nil { + v.err = err + } else { + v.f, v.err = ld.ParseFile(ld.Fset, filename, src) + } + + close(v.ready) + } + return v.f, v.err +} + // parseFiles reads and parses the Go source files and returns the ASTs // of the ones that could be at least partially parsed, along with a // list of I/O and parse errors encountered. @@ -781,24 +926,7 @@ func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { } wg.Add(1) go func(i int, filename string) { - ioLimit <- true // wait - // ParseFile may return both an AST and an error. - var src []byte - for f, contents := range ld.Config.Overlay { - if sameFile(f, filename) { - src = contents - } - } - var err error - if src == nil { - src, err = ioutil.ReadFile(filename) - } - if err != nil { - parsed[i], errors[i] = nil, err - } else { - parsed[i], errors[i] = ld.ParseFile(ld.Fset, filename, src) - } - <-ioLimit // signal + parsed[i], errors[i] = ld.parseFile(filename) wg.Done() }(i, file) } @@ -952,5 +1080,5 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error } func usesExportData(cfg *Config) bool { - return LoadTypes <= cfg.Mode && cfg.Mode < LoadAllSyntax + return cfg.Mode&NeedExportsFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedTypesInfo == 0 } diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go new file mode 100644 index 00000000000..0d85488efb6 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -0,0 +1,523 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package objectpath defines a naming scheme for types.Objects +// (that is, named entities in Go programs) relative to their enclosing +// package. +// +// Type-checker objects are canonical, so they are usually identified by +// their address in memory (a pointer), but a pointer has meaning only +// within one address space. By contrast, objectpath names allow the +// identity of an object to be sent from one program to another, +// establishing a correspondence between types.Object variables that are +// distinct but logically equivalent. +// +// A single object may have multiple paths. In this example, +// type A struct{ X int } +// type B A +// the field X has two paths due to its membership of both A and B. +// The For(obj) function always returns one of these paths, arbitrarily +// but consistently. +package objectpath + +import ( + "fmt" + "strconv" + "strings" + + "go/types" +) + +// A Path is an opaque name that identifies a types.Object +// relative to its package. Conceptually, the name consists of a +// sequence of destructuring operations applied to the package scope +// to obtain the original object. +// The name does not include the package itself. +type Path string + +// Encoding +// +// An object path is a textual and (with training) human-readable encoding +// of a sequence of destructuring operators, starting from a types.Package. +// The sequences represent a path through the package/object/type graph. +// We classify these operators by their type: +// +// PO package->object Package.Scope.Lookup +// OT object->type Object.Type +// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU] +// TO type->object Type.{At,Field,Method,Obj} [AFMO] +// +// All valid paths start with a package and end at an object +// and thus may be defined by the regular language: +// +// objectpath = PO (OT TT* TO)* +// +// The concrete encoding follows directly: +// - The only PO operator is Package.Scope.Lookup, which requires an identifier. +// - The only OT operator is Object.Type, +// which we encode as '.' because dot cannot appear in an identifier. +// - The TT operators are encoded as [EKPRU]. +// - The OT operators are encoded as [AFMO]; +// three of these (At,Field,Method) require an integer operand, +// which is encoded as a string of decimal digits. +// These indices are stable across different representations +// of the same package, even source and export data. +// +// In the example below, +// +// package p +// +// type T interface { +// f() (a string, b struct{ X int }) +// } +// +// field X has the path "T.UM0.RA1.F0", +// representing the following sequence of operations: +// +// p.Lookup("T") T +// .Type().Underlying().Method(0). f +// .Type().Results().At(1) b +// .Type().Field(0) X +// +// The encoding is not maximally compact---every R or P is +// followed by an A, for example---but this simplifies the +// encoder and decoder. +// +const ( + // object->type operators + opType = '.' // .Type() (Object) + + // type->type operators + opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map) + opKey = 'K' // .Key() (Map) + opParams = 'P' // .Params() (Signature) + opResults = 'R' // .Results() (Signature) + opUnderlying = 'U' // .Underlying() (Named) + + // type->object operators + opAt = 'A' // .At(i) (Tuple) + opField = 'F' // .Field(i) (Struct) + opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored) + opObj = 'O' // .Obj() (Named) +) + +// The For function returns the path to an object relative to its package, +// or an error if the object is not accessible from the package's Scope. +// +// The For function guarantees to return a path only for the following objects: +// - package-level types +// - exported package-level non-types +// - methods +// - parameter and result variables +// - struct fields +// These objects are sufficient to define the API of their package. +// The objects described by a package's export data are drawn from this set. +// +// For does not return a path for predeclared names, imported package +// names, local names, and unexported package-level names (except +// types). +// +// Example: given this definition, +// +// package p +// +// type T interface { +// f() (a string, b struct{ X int }) +// } +// +// For(X) would return a path that denotes the following sequence of operations: +// +// p.Scope().Lookup("T") (TypeName T) +// .Type().Underlying().Method(0). (method Func f) +// .Type().Results().At(1) (field Var b) +// .Type().Field(0) (field Var X) +// +// where p is the package (*types.Package) to which X belongs. +func For(obj types.Object) (Path, error) { + pkg := obj.Pkg() + + // This table lists the cases of interest. + // + // Object Action + // ------ ------ + // nil reject + // builtin reject + // pkgname reject + // label reject + // var + // package-level accept + // func param/result accept + // local reject + // struct field accept + // const + // package-level accept + // local reject + // func + // package-level accept + // init functions reject + // concrete method accept + // interface method accept + // type + // package-level accept + // local reject + // + // The only accessible package-level objects are members of pkg itself. + // + // The cases are handled in four steps: + // + // 1. reject nil and builtin + // 2. accept package-level objects + // 3. reject obviously invalid objects + // 4. search the API for the path to the param/result/field/method. + + // 1. reference to nil or builtin? + if pkg == nil { + return "", fmt.Errorf("predeclared %s has no path", obj) + } + scope := pkg.Scope() + + // 2. package-level object? + if scope.Lookup(obj.Name()) == obj { + // Only exported objects (and non-exported types) have a path. + // Non-exported types may be referenced by other objects. + if _, ok := obj.(*types.TypeName); !ok && !obj.Exported() { + return "", fmt.Errorf("no path for non-exported %v", obj) + } + return Path(obj.Name()), nil + } + + // 3. Not a package-level object. + // Reject obviously non-viable cases. + switch obj := obj.(type) { + case *types.Const, // Only package-level constants have a path. + *types.TypeName, // Only package-level types have a path. + *types.Label, // Labels are function-local. + *types.PkgName: // PkgNames are file-local. + return "", fmt.Errorf("no path for %v", obj) + + case *types.Var: + // Could be: + // - a field (obj.IsField()) + // - a func parameter or result + // - a local var. + // Sadly there is no way to distinguish + // a param/result from a local + // so we must proceed to the find. + + case *types.Func: + // A func, if not package-level, must be a method. + if recv := obj.Type().(*types.Signature).Recv(); recv == nil { + return "", fmt.Errorf("func is not a method: %v", obj) + } + // TODO(adonovan): opt: if the method is concrete, + // do a specialized version of the rest of this function so + // that it's O(1) not O(|scope|). Basically 'find' is needed + // only for struct fields and interface methods. + + default: + panic(obj) + } + + // 4. Search the API for the path to the var (field/param/result) or method. + + // First inspect package-level named types. + // In the presence of path aliases, these give + // the best paths because non-types may + // refer to types, but not the reverse. + empty := make([]byte, 0, 48) // initial space + for _, name := range scope.Names() { + o := scope.Lookup(name) + tname, ok := o.(*types.TypeName) + if !ok { + continue // handle non-types in second pass + } + + path := append(empty, name...) + path = append(path, opType) + + T := o.Type() + + if tname.IsAlias() { + // type alias + if r := find(obj, T, path); r != nil { + return Path(r), nil + } + } else { + // defined (named) type + if r := find(obj, T.Underlying(), append(path, opUnderlying)); r != nil { + return Path(r), nil + } + } + } + + // Then inspect everything else: + // non-types, and declared methods of defined types. + for _, name := range scope.Names() { + o := scope.Lookup(name) + path := append(empty, name...) + if _, ok := o.(*types.TypeName); !ok { + if o.Exported() { + // exported non-type (const, var, func) + if r := find(obj, o.Type(), append(path, opType)); r != nil { + return Path(r), nil + } + } + continue + } + + // Inspect declared methods of defined types. + if T, ok := o.Type().(*types.Named); ok { + path = append(path, opType) + for i := 0; i < T.NumMethods(); i++ { + m := T.Method(i) + path2 := appendOpArg(path, opMethod, i) + if m == obj { + return Path(path2), nil // found declared method + } + if r := find(obj, m.Type(), append(path2, opType)); r != nil { + return Path(r), nil + } + } + } + } + + return "", fmt.Errorf("can't find path for %v in %s", obj, pkg.Path()) +} + +func appendOpArg(path []byte, op byte, arg int) []byte { + path = append(path, op) + path = strconv.AppendInt(path, int64(arg), 10) + return path +} + +// find finds obj within type T, returning the path to it, or nil if not found. +func find(obj types.Object, T types.Type, path []byte) []byte { + switch T := T.(type) { + case *types.Basic, *types.Named: + // Named types belonging to pkg were handled already, + // so T must belong to another package. No path. + return nil + case *types.Pointer: + return find(obj, T.Elem(), append(path, opElem)) + case *types.Slice: + return find(obj, T.Elem(), append(path, opElem)) + case *types.Array: + return find(obj, T.Elem(), append(path, opElem)) + case *types.Chan: + return find(obj, T.Elem(), append(path, opElem)) + case *types.Map: + if r := find(obj, T.Key(), append(path, opKey)); r != nil { + return r + } + return find(obj, T.Elem(), append(path, opElem)) + case *types.Signature: + if r := find(obj, T.Params(), append(path, opParams)); r != nil { + return r + } + return find(obj, T.Results(), append(path, opResults)) + case *types.Struct: + for i := 0; i < T.NumFields(); i++ { + f := T.Field(i) + path2 := appendOpArg(path, opField, i) + if f == obj { + return path2 // found field var + } + if r := find(obj, f.Type(), append(path2, opType)); r != nil { + return r + } + } + return nil + case *types.Tuple: + for i := 0; i < T.Len(); i++ { + v := T.At(i) + path2 := appendOpArg(path, opAt, i) + if v == obj { + return path2 // found param/result var + } + if r := find(obj, v.Type(), append(path2, opType)); r != nil { + return r + } + } + return nil + case *types.Interface: + for i := 0; i < T.NumMethods(); i++ { + m := T.Method(i) + path2 := appendOpArg(path, opMethod, i) + if m == obj { + return path2 // found interface method + } + if r := find(obj, m.Type(), append(path2, opType)); r != nil { + return r + } + } + return nil + } + panic(T) +} + +// Object returns the object denoted by path p within the package pkg. +func Object(pkg *types.Package, p Path) (types.Object, error) { + if p == "" { + return nil, fmt.Errorf("empty path") + } + + pathstr := string(p) + var pkgobj, suffix string + if dot := strings.IndexByte(pathstr, opType); dot < 0 { + pkgobj = pathstr + } else { + pkgobj = pathstr[:dot] + suffix = pathstr[dot:] // suffix starts with "." + } + + obj := pkg.Scope().Lookup(pkgobj) + if obj == nil { + return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj) + } + + // abtraction of *types.{Pointer,Slice,Array,Chan,Map} + type hasElem interface { + Elem() types.Type + } + // abstraction of *types.{Interface,Named} + type hasMethods interface { + Method(int) *types.Func + NumMethods() int + } + + // The loop state is the pair (t, obj), + // exactly one of which is non-nil, initially obj. + // All suffixes start with '.' (the only object->type operation), + // followed by optional type->type operations, + // then a type->object operation. + // The cycle then repeats. + var t types.Type + for suffix != "" { + code := suffix[0] + suffix = suffix[1:] + + // Codes [AFM] have an integer operand. + var index int + switch code { + case opAt, opField, opMethod: + rest := strings.TrimLeft(suffix, "0123456789") + numerals := suffix[:len(suffix)-len(rest)] + suffix = rest + i, err := strconv.Atoi(numerals) + if err != nil { + return nil, fmt.Errorf("invalid path: bad numeric operand %q for code %q", numerals, code) + } + index = int(i) + case opObj: + // no operand + default: + // The suffix must end with a type->object operation. + if suffix == "" { + return nil, fmt.Errorf("invalid path: ends with %q, want [AFMO]", code) + } + } + + if code == opType { + if t != nil { + return nil, fmt.Errorf("invalid path: unexpected %q in type context", opType) + } + t = obj.Type() + obj = nil + continue + } + + if t == nil { + return nil, fmt.Errorf("invalid path: code %q in object context", code) + } + + // Inv: t != nil, obj == nil + + switch code { + case opElem: + hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want pointer, slice, array, chan or map)", code, t, t) + } + t = hasElem.Elem() + + case opKey: + mapType, ok := t.(*types.Map) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want map)", code, t, t) + } + t = mapType.Key() + + case opParams: + sig, ok := t.(*types.Signature) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) + } + t = sig.Params() + + case opResults: + sig, ok := t.(*types.Signature) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) + } + t = sig.Results() + + case opUnderlying: + named, ok := t.(*types.Named) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %s, want named)", code, t, t) + } + t = named.Underlying() + + case opAt: + tuple, ok := t.(*types.Tuple) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %s, want tuple)", code, t, t) + } + if n := tuple.Len(); index >= n { + return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) + } + obj = tuple.At(index) + t = nil + + case opField: + structType, ok := t.(*types.Struct) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want struct)", code, t, t) + } + if n := structType.NumFields(); index >= n { + return nil, fmt.Errorf("field index %d out of range [0-%d)", index, n) + } + obj = structType.Field(index) + t = nil + + case opMethod: + hasMethods, ok := t.(hasMethods) // Interface or Named + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %s, want interface or named)", code, t, t) + } + if n := hasMethods.NumMethods(); index >= n { + return nil, fmt.Errorf("method index %d out of range [0-%d)", index, n) + } + obj = hasMethods.Method(index) + t = nil + + case opObj: + named, ok := t.(*types.Named) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %s, want named)", code, t, t) + } + obj = named.Obj() + t = nil + + default: + return nil, fmt.Errorf("invalid path: unknown code %q", code) + } + } + + if obj.Pkg() != pkg { + return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj) + } + + return obj, nil // success +} diff --git a/vendor/golang.org/x/tools/imports/fix.go b/vendor/golang.org/x/tools/imports/fix.go index 4c0339305db..777d28ccdba 100644 --- a/vendor/golang.org/x/tools/imports/fix.go +++ b/vendor/golang.org/x/tools/imports/fix.go @@ -1043,7 +1043,7 @@ func findImport(ctx context.Context, env *fixEnv, dirScan []*pkg, pkgName string // Find candidate packages, looking only at their directory names first. var candidates []pkgDistance for _, pkg := range dirScan { - if pkgIsCandidate(filename, pkgName, pkg) { + if pkg.dir != pkgDir && pkgIsCandidate(filename, pkgName, pkg) { candidates = append(candidates, pkgDistance{ pkg: pkg, distance: distance(pkgDir, pkg.dir), diff --git a/vendor/golang.org/x/tools/imports/mkstdlib.go b/vendor/golang.org/x/tools/imports/mkstdlib.go index 5059ad4d7d3..c8865e5559f 100644 --- a/vendor/golang.org/x/tools/imports/mkstdlib.go +++ b/vendor/golang.org/x/tools/imports/mkstdlib.go @@ -14,6 +14,7 @@ import ( "io/ioutil" "log" "os" + "os/exec" "path/filepath" "regexp" "runtime" @@ -59,6 +60,10 @@ func main() { mustOpen(api("go1.10.txt")), mustOpen(api("go1.11.txt")), mustOpen(api("go1.12.txt")), + + // The API of the syscall/js package needs to be computed explicitly, + // because it's not included in the GOROOT/api/go1.*.txt files at this time. + syscallJSAPI(), ) sc := bufio.NewScanner(f) @@ -110,3 +115,18 @@ func main() { log.Fatal(err) } } + +// syscallJSAPI returns the API of the syscall/js package. +// It's computed from the contents of $(go env GOROOT)/src/syscall/js. +func syscallJSAPI() io.Reader { + var exeSuffix string + if runtime.GOOS == "windows" { + exeSuffix = ".exe" + } + cmd := exec.Command("go"+exeSuffix, "run", "cmd/api", "-contexts", "js-wasm", "syscall/js") + out, err := cmd.Output() + if err != nil { + log.Fatalln(err) + } + return bytes.NewReader(out) +} diff --git a/vendor/golang.org/x/tools/imports/zstdlib.go b/vendor/golang.org/x/tools/imports/zstdlib.go index c18a0095b20..d81b8c5307f 100644 --- a/vendor/golang.org/x/tools/imports/zstdlib.go +++ b/vendor/golang.org/x/tools/imports/zstdlib.go @@ -9783,6 +9783,29 @@ var stdlib = map[string]map[string]bool{ "XP1_UNI_RECV": true, "XP1_UNI_SEND": true, }, + "syscall/js": map[string]bool{ + "Error": true, + "Func": true, + "FuncOf": true, + "Global": true, + "Null": true, + "Type": true, + "TypeBoolean": true, + "TypeFunction": true, + "TypeNull": true, + "TypeNumber": true, + "TypeObject": true, + "TypeString": true, + "TypeSymbol": true, + "TypeUndefined": true, + "TypedArray": true, + "TypedArrayOf": true, + "Undefined": true, + "Value": true, + "ValueError": true, + "ValueOf": true, + "Wrapper": true, + }, "testing": map[string]bool{ "AllocsPerRun": true, "B": true, diff --git a/vendor/modules.txt b/vendor/modules.txt index 99a369b30bf..66fa92fbbb2 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -21,7 +21,7 @@ github.com/apparentlymart/go-cidr/cidr github.com/apparentlymart/go-textseg/textseg # github.com/armon/go-radix v1.0.0 github.com/armon/go-radix -# github.com/aws/aws-sdk-go v1.19.39 +# github.com/aws/aws-sdk-go v1.19.42 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn github.com/aws/aws-sdk-go/aws/awserr @@ -180,6 +180,28 @@ github.com/aws/aws-sdk-go/private/protocol/query/queryutil github.com/aws/aws-sdk-go/internal/sdkuri # github.com/beevik/etree v1.1.0 github.com/beevik/etree +# github.com/bflad/tfproviderlint v0.2.0 +github.com/bflad/tfproviderlint/cmd/tfproviderlint +github.com/bflad/tfproviderlint/passes/AT001 +github.com/bflad/tfproviderlint/passes/AT002 +github.com/bflad/tfproviderlint/passes/AT003 +github.com/bflad/tfproviderlint/passes/AT004 +github.com/bflad/tfproviderlint/passes/R001 +github.com/bflad/tfproviderlint/passes/R002 +github.com/bflad/tfproviderlint/passes/R003 +github.com/bflad/tfproviderlint/passes/R004 +github.com/bflad/tfproviderlint/passes/S001 +github.com/bflad/tfproviderlint/passes/S002 +github.com/bflad/tfproviderlint/passes/S003 +github.com/bflad/tfproviderlint/passes/S004 +github.com/bflad/tfproviderlint/passes/S005 +github.com/bflad/tfproviderlint/passes/S006 +github.com/bflad/tfproviderlint/passes/acctestcase +github.com/bflad/tfproviderlint/passes/commentignore +github.com/bflad/tfproviderlint/passes/acctestfunc +github.com/bflad/tfproviderlint/passes/resourcedataset +github.com/bflad/tfproviderlint/passes/schemaresource +github.com/bflad/tfproviderlint/passes/schemaschema # github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d github.com/bgentry/go-netrc/netrc # github.com/bgentry/speakeasy v0.1.0 @@ -231,7 +253,7 @@ github.com/gobwas/glob/util/strings # github.com/gogo/protobuf v1.2.0 github.com/gogo/protobuf/proto github.com/gogo/protobuf/sortkeys -# github.com/golang/mock v1.2.0 +# github.com/golang/mock v1.3.1 github.com/golang/mock/gomock # github.com/golang/protobuf v1.3.0 github.com/golang/protobuf/proto @@ -285,7 +307,7 @@ github.com/golangci/gocyclo/pkg/gocyclo # github.com/golangci/gofmt v0.0.0-20181105071733-0b8337e80d98 github.com/golangci/gofmt/gofmt github.com/golangci/gofmt/goimports -# github.com/golangci/golangci-lint v1.16.0 +# github.com/golangci/golangci-lint v1.16.1-0.20190402065613-de1d1ad903cd github.com/golangci/golangci-lint/cmd/golangci-lint github.com/golangci/golangci-lint/pkg/commands github.com/golangci/golangci-lint/pkg/config @@ -324,9 +346,10 @@ github.com/golangci/prealloc github.com/golangci/revgrep # github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 github.com/golangci/unconvert -# github.com/google/go-cmp v0.2.0 +# github.com/google/go-cmp v0.3.0 github.com/google/go-cmp/cmp github.com/google/go-cmp/cmp/internal/diff +github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value # github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf @@ -384,7 +407,7 @@ github.com/hashicorp/hil/parser github.com/hashicorp/hil/scanner # github.com/hashicorp/logutils v1.0.0 github.com/hashicorp/logutils -# github.com/hashicorp/terraform v0.12.0 +# github.com/hashicorp/terraform v0.12.1 github.com/hashicorp/terraform/plugin github.com/hashicorp/terraform/flatmap github.com/hashicorp/terraform/helper/customdiff @@ -612,20 +635,28 @@ golang.org/x/text/transform golang.org/x/text/width golang.org/x/text/secure/bidirule golang.org/x/text/unicode/bidi -# golang.org/x/tools v0.0.0-20190314010720-f0bfdbff1f9c -golang.org/x/tools/go/loader +# golang.org/x/tools v0.0.0-20190510151030-63859f3815cb +golang.org/x/tools/go/analysis/multichecker +golang.org/x/tools/go/analysis +golang.org/x/tools/go/analysis/passes/inspect +golang.org/x/tools/go/ast/inspector +golang.org/x/tools/go/analysis/internal/analysisflags +golang.org/x/tools/go/analysis/internal/checker +golang.org/x/tools/go/analysis/unitchecker golang.org/x/tools/go/packages +golang.org/x/tools/go/analysis/internal/facts +golang.org/x/tools/go/loader golang.org/x/tools/go/ssa golang.org/x/tools/go/ssa/ssautil -golang.org/x/tools/go/ast/astutil -golang.org/x/tools/go/buildutil -golang.org/x/tools/go/internal/cgo golang.org/x/tools/go/gcexportdata golang.org/x/tools/go/internal/packagesdriver golang.org/x/tools/internal/gopathwalk golang.org/x/tools/internal/semver +golang.org/x/tools/go/types/objectpath +golang.org/x/tools/go/ast/astutil +golang.org/x/tools/go/buildutil +golang.org/x/tools/go/internal/cgo golang.org/x/tools/go/types/typeutil -golang.org/x/tools/go/analysis golang.org/x/tools/go/analysis/passes/asmdecl golang.org/x/tools/go/analysis/passes/assign golang.org/x/tools/go/analysis/passes/atomic @@ -654,8 +685,6 @@ golang.org/x/tools/go/internal/gcimporter golang.org/x/tools/internal/fastwalk golang.org/x/tools/go/analysis/passes/buildssa golang.org/x/tools/go/analysis/passes/internal/analysisutil -golang.org/x/tools/go/analysis/passes/inspect -golang.org/x/tools/go/ast/inspector golang.org/x/tools/go/analysis/passes/ctrlflow golang.org/x/tools/go/cfg golang.org/x/tools/internal/module diff --git a/website/docs/d/msk_cluster.html.markdown b/website/docs/d/msk_cluster.html.markdown index 942604ccb8e..262e6bd359b 100644 --- a/website/docs/d/msk_cluster.html.markdown +++ b/website/docs/d/msk_cluster.html.markdown @@ -30,6 +30,7 @@ In addition to all arguments above, the following attributes are exported: * `arn` - Amazon Resource Name (ARN) of the MSK cluster. * `bootstrap_brokers` - A comma separated list of one or more hostname:port pairs of Kafka brokers suitable to boostrap connectivity to the Kafka cluster. +* `bootstrap_brokers_tls` - A comma separated list of one or more DNS names (or IPs) and TLS port pairs kafka brokers suitable to boostrap connectivity to the kafka cluster. * `kafka_version` - Apache Kafka version. * `number_of_broker_nodes` - Number of broker nodes in the cluster. * `tags` - Map of key-value pairs assigned to the cluster. diff --git a/website/docs/r/cloudtrail.html.markdown b/website/docs/r/cloudtrail.html.markdown index 4b59db80e97..74d5593718a 100644 --- a/website/docs/r/cloudtrail.html.markdown +++ b/website/docs/r/cloudtrail.html.markdown @@ -22,6 +22,8 @@ Enable CloudTrail to capture all compatible management events in region. For capturing events from services like IAM, `include_global_service_events` must be enabled. ```hcl +data "aws_caller_identity" "current" {} + resource "aws_cloudtrail" "foobar" { name = "tf-trail-foobar" s3_bucket_name = "${aws_s3_bucket.foo.id}" @@ -53,7 +55,7 @@ resource "aws_s3_bucket" "foo" { "Service": "cloudtrail.amazonaws.com" }, "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::tf-test-trail/*", + "Resource": "arn:aws:s3:::tf-test-trail/prefix/AWSLogs/${data.aws_caller_identity.current.account_id}/*", "Condition": { "StringEquals": { "s3:x-amz-acl": "bucket-owner-full-control" diff --git a/website/docs/r/cloudwatch_event_target.html.markdown b/website/docs/r/cloudwatch_event_target.html.markdown index 32819d92ff6..a9d71726ead 100644 --- a/website/docs/r/cloudwatch_event_target.html.markdown +++ b/website/docs/r/cloudwatch_event_target.html.markdown @@ -217,7 +217,7 @@ resource "aws_cloudwatch_event_target" "ecs_scheduled_task" { rule = "${aws_cloudwatch_event_rule.every_hour.name}" role_arn = "${aws_iam_role.ecs_events.arn}" - ecs_target = { + ecs_target { task_count = 1 task_definition_arn = "${aws_ecs_task_definition.task_name.arn}" } diff --git a/website/docs/r/codepipeline_webhook.markdown b/website/docs/r/codepipeline_webhook.markdown index f0004270334..071da22e1cf 100644 --- a/website/docs/r/codepipeline_webhook.markdown +++ b/website/docs/r/codepipeline_webhook.markdown @@ -96,7 +96,7 @@ resource "github_repository_webhook" "bar" { configuration { url = "${aws_codepipeline_webhook.bar.url}" - content_type = "form" + content_type = "json" insecure_ssl = true secret = "${local.webhook_secret}" } diff --git a/website/docs/r/msk_cluster.html.markdown b/website/docs/r/msk_cluster.html.markdown index 3a50d18eaa8..37aebe6acf4 100644 --- a/website/docs/r/msk_cluster.html.markdown +++ b/website/docs/r/msk_cluster.html.markdown @@ -10,8 +10,6 @@ description: |- Manages AWS Managed Streaming for Kafka cluster -~> **NOTE:** This AWS service is in Preview and may change before General Availability release. Backwards compatibility is not guaranteed between Terraform AWS Provider releases. - ## Example Usage ```hcl @@ -88,19 +86,17 @@ output "bootstrap_brokers" { The following arguments are supported: -* `broker_node_group_info` - (Required) Nested data for configuring the broker nodes of the Kafka cluster. +* `broker_node_group_info` - (Required) Configuration block for the broker nodes of the Kafka cluster. * `cluster_name` - (Required) Name of the MSK cluster. * `kafka_version` - (Required) Specify the desired Kafka software version. * `number_of_broker_nodes` - (Required) The desired total number of broker nodes in the kafka cluster. It must be a multiple of the number of specified client subnets. -* `encryption_info` - (Optional) Nested data for specifying encryption at rest info. See below. +* `client_authentication` - (Optional) Configuration block for specifying a client authentication. See below. +* `configuration_info` - (Optional) Configuration block for specifying a MSK Configuration to attach to Kafka brokers. See below. +* `encryption_info` - (Optional) Configuration block for specifying encryption. See below. * `enhanced_monitoring` - (Optional) Specify the desired enhanced MSK CloudWatch monitoring level. See [Monitoring Amazon MSK with Amazon CloudWatch](https://docs.aws.amazon.com/msk/latest/developerguide/monitoring.html) * `tags` - (Optional) A mapping of tags to assign to the resource -**encryption_info** supports the following attributes: - -* `encryption_at_rest_kms_key_arn` - (Optional) You may specify a KMS key short ID or ARN (it will always output an ARN) to use for encrypting your data at rest. If no key is specified, an AWS managed KMS ('aws/msk' managed service) key will be used for encrypting the data at rest. - -**broker_node_group_info** supports the following attributes: +### broker_node_group_info Argument Reference * `client_subnets` - (Required) A list of subnets to connect to in client VPC ([documentation](https://docs.aws.amazon.com/msk/1.0/apireference/clusters.html#clusters-prop-brokernodegroupinfo-clientsubnets)). * `ebs_volume_size` - (Required) The size in GiB of the EBS volume for the data drive on each broker node. @@ -108,12 +104,37 @@ The following arguments are supported: * `security_groups` - (Required) A list of the security groups to associate with the elastic network interfaces to control who can communicate with the cluster. * `az_distribution` - (Optional) The distribution of broker nodes across availability zones ([documentation](https://docs.aws.amazon.com/msk/1.0/apireference/clusters.html#clusters-model-brokerazdistribution)). Currently the only valid value is `DEFAULT`. +### client_authentication Argument Reference + +* `tls` - (Optional) Configuration block for specifying TLS client authentication. See below. + +#### client_authenication tls Argument Reference + +* `certificate_authority_arns` - (Optional) List of ACM Certificate Authority Amazon Resource Names (ARNs). + +### configuration_info Argument Reference + +* `arn` - (Required) Amazon Resource Name (ARN) of the MSK Configuration to use in the cluster. +* `revision` - (Required) Revision of the MSK Configuration to use in the cluster. + +### encryption_info Argument Reference + +* `encryption_in_transit` - (Optional) Configuration block to specify encryption in transit. See below. +* `encryption_at_rest_kms_key_arn` - (Optional) You may specify a KMS key short ID or ARN (it will always output an ARN) to use for encrypting your data at rest. If no key is specified, an AWS managed KMS ('aws/msk' managed service) key will be used for encrypting the data at rest. + +#### encryption_info encryption_in_transit Argument Reference + +* `client_broker` - (Optional) Encryption setting for data in transit between clients and brokers. Valid values: `TLS`, `TLS_PLAINTEXT`, and `PLAINTEXT`. Default value: `TLS_PLAINTEXT`. +* `in_cluster` - (Optional) Whether data communication among broker nodes is encrypted. Default value: `true`. + ## Attributes Reference In addition to all arguments above, the following attributes are exported: * `arn` - Amazon Resource Name (ARN) of the MSK cluster. * `bootstrap_brokers` - A comma separated list of one or more hostname:port pairs of kafka brokers suitable to boostrap connectivity to the kafka cluster. +* `bootstrap_brokers_tls` - A comma separated list of one or more DNS names (or IPs) and TLS port pairs kafka brokers suitable to boostrap connectivity to the kafka cluster. +* `current_version` - Current version of the MSK Cluster used for updates, e.g. `K13V1IB3VIYZZH` * `encryption_info.0.encryption_at_rest_kms_key_arn` - The ARN of the KMS key used for encryption at rest of the broker data volumes. * `zookeeper_connect_string` - A comma separated list of one or more IP:port pairs to use to connect to the Apache Zookeeper cluster. diff --git a/website/docs/r/msk_configuration.html.markdown b/website/docs/r/msk_configuration.html.markdown index 3fd991a549b..ed24ccdd010 100644 --- a/website/docs/r/msk_configuration.html.markdown +++ b/website/docs/r/msk_configuration.html.markdown @@ -12,8 +12,6 @@ Manages an Amazon Managed Streaming for Kafka configuration. More information ca ~> **NOTE:** The API does not support deleting MSK configurations. Removing this Terraform resource will only remove the Terraform state for it. -~> **NOTE:** This AWS service is in Preview and may change before General Availability release. Backwards compatibility is not guaranteed between Terraform AWS Provider releases. - ## Example Usage ```hcl diff --git a/website/docs/r/route53_record.html.markdown b/website/docs/r/route53_record.html.markdown index 4c6696b2cb3..9d6a0751295 100644 --- a/website/docs/r/route53_record.html.markdown +++ b/website/docs/r/route53_record.html.markdown @@ -90,6 +90,31 @@ resource "aws_route53_record" "www" { } ``` +### NS and SOA Record Management + +When creating Route 53 zones, the `NS` and `SOA` records for the zone are automatically created. Enabling the `allow_overwrite` argument will allow managing these records in a single Terraform run without the requirement for `terraform import`. + +```hcl +resource "aws_route53_zone" "example" { + name = "test.example.com" +} + +resource "aws_route53_record" "example" { + allow_overwrite = true + name = "test.example.com" + ttl = 30 + type = "NS" + zone_id = "${aws_route53_zone.example.zone_id}" + + records = [ + "${aws_route53_zone.example.name_servers.0}", + "${aws_route53_zone.example.name_servers.1}", + "${aws_route53_zone.example.name_servers.2}", + "${aws_route53_zone.example.name_servers.3}", + ] +} +``` + ## Argument Reference The following arguments are supported: diff --git a/website/docs/r/ssm_activation.html.markdown b/website/docs/r/ssm_activation.html.markdown index 1eb716102db..a845a07b205 100644 --- a/website/docs/r/ssm_activation.html.markdown +++ b/website/docs/r/ssm_activation.html.markdown @@ -57,6 +57,7 @@ The following arguments are supported: In addition to all arguments above, the following attributes are exported: +* `id` - The activation ID. * `activation_code` - The code the system generates when it processes the activation. * `name` - The default name of the registered managed instance. * `description` - The description of the resource that was registered. diff --git a/website/docs/r/waf_sql_injection_match_set.html.markdown b/website/docs/r/waf_sql_injection_match_set.html.markdown index 8626251e317..b9c53b1eae6 100644 --- a/website/docs/r/waf_sql_injection_match_set.html.markdown +++ b/website/docs/r/waf_sql_injection_match_set.html.markdown @@ -30,7 +30,7 @@ resource "aws_waf_sql_injection_match_set" "sql_injection_match_set" { The following arguments are supported: -* `name` - (Required) The name or description of the SizeConstraintSet. +* `name` - (Required) The name or description of the SQL Injection Match Set. * `sql_injection_match_tuples` - (Optional) The parts of web requests that you want AWS WAF to inspect for malicious SQL code and, if you want AWS WAF to inspect a header, the name of the header. ## Nested Blocks